summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2020-06-04 10:48:41 +0200
committerGitHub <noreply@github.com>2020-06-04 10:48:41 +0200
commitd6e70c6df9ab9bf768efa69ba29601b64721ffd1 (patch)
treec7286b7f1572e01104737beb13b7a0fccbd18284
parent1f8c509fafb4ce41970c4f28ed55daec459c7520 (diff)
parent545aef7d9bd48b268222540ef7c5ffbf8b02ea6e (diff)
downloadpodman-d6e70c6df9ab9bf768efa69ba29601b64721ffd1.tar.gz
podman-d6e70c6df9ab9bf768efa69ba29601b64721ffd1.tar.bz2
podman-d6e70c6df9ab9bf768efa69ba29601b64721ffd1.zip
Merge pull request #6487 from rhatdan/VENDOR
Vendor in container/storage v1.20.2
-rw-r--r--contrib/gate/Dockerfile2
-rw-r--r--go.mod2
-rw-r--r--go.sum5
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/deviceset.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go3
-rw-r--r--vendor/github.com/containers/storage/go.mod10
-rw-r--r--vendor/github.com/containers/storage/go.sum28
-rw-r--r--vendor/github.com/containers/storage/pkg/config/config.go10
-rw-r--r--vendor/github.com/containers/storage/storage.conf3
-rw-r--r--vendor/github.com/containers/storage/store.go3
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go166
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md14
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go34
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bytereader.go9
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go34
-rw-r--r--vendor/github.com/klauspost/compress/zstd/dict.go104
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_dfast.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_fast.go15
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder.go21
-rw-r--r--vendor/github.com/klauspost/compress/zstd/framedec.go23
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_decoder.go2
-rw-r--r--vendor/github.com/klauspost/compress/zstd/history.go18
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec.go144
-rw-r--r--vendor/modules.txt4
25 files changed, 492 insertions, 172 deletions
diff --git a/contrib/gate/Dockerfile b/contrib/gate/Dockerfile
index f86709b00..657b5accf 100644
--- a/contrib/gate/Dockerfile
+++ b/contrib/gate/Dockerfile
@@ -13,7 +13,7 @@ COPY . $GOSRC
# Note: adding conmon and crun so podman command checks will work
RUN dnf -y install \
$(grep "^[^#]" $GOSRC/contrib/dependencies.txt) diffutils containers-common fuse-overlayfs conmon crun runc --exclude container-selinux; \
- sed -i -e 's|^#mount_program|mount_program|g' /etc/containers/storage.conf \
+ sed -i -e 's|^#mount_program|mount_program|g' -e 's/# size.*/skip_mount_home = "true"/g' /etc/containers/storage.conf \
&& dnf clean all
# Install dependencies
diff --git a/go.mod b/go.mod
index 617f118f1..6955dac5b 100644
--- a/go.mod
+++ b/go.mod
@@ -15,7 +15,7 @@ require (
github.com/containers/conmon v2.0.16+incompatible
github.com/containers/image/v5 v5.4.5-0.20200529084758-46b2ee6aebb0
github.com/containers/psgo v1.5.1
- github.com/containers/storage v1.20.1
+ github.com/containers/storage v1.20.2
github.com/coreos/go-systemd/v22 v22.1.0
github.com/cri-o/ocicni v0.2.0
github.com/cyphar/filepath-securejoin v0.2.2
diff --git a/go.sum b/go.sum
index 38fdfe902..1ceb80ebe 100644
--- a/go.sum
+++ b/go.sum
@@ -89,6 +89,8 @@ github.com/containers/storage v1.19.1/go.mod h1:KbXjSwKnx17ejOsjFcCXSf78mCgZkQSL
github.com/containers/storage v1.19.2/go.mod h1:gYCp3jzgXkvubO0rI14QAjz5Mxm/qKJgLmHFyqayDnw=
github.com/containers/storage v1.20.1 h1:2XE4eRIqSa6YjhAZjNwIkIKE6+Miy+5WV8l1KzY2ZKk=
github.com/containers/storage v1.20.1/go.mod h1:RoKzO8KSDogCT6c06rEbanZTcKYxshorB33JikEGc3A=
+github.com/containers/storage v1.20.2 h1:tw/uKRPDnmVrluIzer3dawTFG/bTJLP8IEUyHFhltYk=
+github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38=
@@ -257,6 +259,8 @@ github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqj
github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.6 h1:SP6zavvTG3YjOosWePXFDlExpKIWMTO4SE/Y8MZB2vI=
github.com/klauspost/compress v1.10.6/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg=
+github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@@ -560,6 +564,7 @@ golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 0044d6cb9..769e37e15 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.20.1
+1.20.2
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
index f1c941f11..ff6e297f4 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -1544,8 +1544,8 @@ func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) {
}
dev := stat.Rdev
- majorNum := major(dev)
- minorNum := minor(dev)
+ majorNum := major(uint64(dev))
+ minorNum := minor(uint64(dev))
logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum)
return majorNum, minorNum, nil
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 2906e3e08..930a57a97 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -311,6 +311,9 @@ func parseOptions(options []string) (*overlayOptions, error) {
return nil, fmt.Errorf("overlay: can't stat program %s: %v", val, err)
}
o.mountProgram = val
+ case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home":
+ logrus.Debugf("overlay: skip_mount_home=%s", val)
+ o.skipMountHome, err = strconv.ParseBool(val)
case ".ignore_chown_errors", "overlay2.ignore_chown_errors", "overlay.ignore_chown_errors":
logrus.Debugf("overlay: ignore_chown_errors=%s", val)
o.ignoreChownErrors, err = strconv.ParseBool(val)
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index a7d9ade60..01ac1827b 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -6,23 +6,23 @@ require (
github.com/Microsoft/hcsshim v0.8.9
github.com/docker/go-units v0.4.0
github.com/hashicorp/go-multierror v1.0.0
- github.com/klauspost/compress v1.10.5
+ github.com/klauspost/compress v1.10.7
github.com/klauspost/pgzip v1.2.4
github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/runc v1.0.0-rc9
+ github.com/opencontainers/runc v1.0.0-rc90
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
- github.com/opencontainers/selinux v1.5.1
+ github.com/opencontainers/selinux v1.5.2
github.com/pkg/errors v0.9.1
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
github.com/sirupsen/logrus v1.6.0
- github.com/stretchr/testify v1.5.1
+ github.com/stretchr/testify v1.6.0
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/vbatts/tar-split v0.11.1
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
- golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2
+ golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
gotest.tools v2.2.0+incompatible
)
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index eab0fd61e..2b5a279c6 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -8,6 +8,7 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -15,12 +16,14 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
@@ -29,6 +32,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
@@ -42,8 +46,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc=
-github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg=
+github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -62,12 +66,12 @@ github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
-github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc90 h1:4+xo8mtWixbHoEm451+WJNUrq12o2/tDsyK9Vgc/NcA=
+github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y=
-github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
+github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q=
+github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -81,15 +85,17 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho=
+github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
@@ -120,8 +126,8 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 h1:/J2nHFg1MTqaRLFO7M+J78ASNsJoz3r0cvHBPQ77fsE=
-golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -145,6 +151,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go
index 1ac6c04e3..4a35997ea 100644
--- a/vendor/github.com/containers/storage/pkg/config/config.go
+++ b/vendor/github.com/containers/storage/pkg/config/config.go
@@ -2,8 +2,6 @@ package config
import (
"fmt"
-
- "github.com/sirupsen/logrus"
)
// ThinpoolOptionsConfig represents the "storage.options.thinpool"
@@ -269,11 +267,11 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
} else if options.Size != "" {
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
}
-
- if options.Overlay.SkipMountHome != "" || options.SkipMountHome != "" {
- logrus.Warn("skip_mount_home option is no longer supported, ignoring option")
+ if options.Overlay.SkipMountHome != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.Overlay.SkipMountHome))
+ } else if options.SkipMountHome != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.SkipMountHome))
}
-
case "vfs":
if options.Vfs.IgnoreChownErrors != "" {
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors))
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index c7f9b2cf8..19909e9c6 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -76,6 +76,9 @@ additionalimagestores = [
# mountopt specifies comma separated list of extra mount options
mountopt = "nodev"
+# Set to skip a PRIVATE bind mount on the storage home directory.
+# skip_mount_home = "false"
+
# Size is used to set a maximum size of the container image.
# size = ""
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index eaf622f43..263f4edb2 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -3481,6 +3481,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
if config.Storage.Options.MountProgram != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram))
}
+ if config.Storage.Options.SkipMountHome != "" {
+ storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome))
+ }
if config.Storage.Options.IgnoreChownErrors != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ignore_chown_errors=%s", config.Storage.Driver, config.Storage.Options.IgnoreChownErrors))
}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 97ae66a4a..fb42a398b 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -155,20 +155,70 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
// The length of the supplied input must match the end of a block exactly.
// Before this is called, the table must be initialized with ReadTable unless
// the encoder re-used the table.
+// deprecated: Use the stateless Decoder() to get a concurrent version.
func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
- if len(s.dt.single) == 0 {
+ if cap(s.Out) < s.MaxDecodedSize {
+ s.Out = make([]byte, s.MaxDecodedSize)
+ }
+ s.Out = s.Out[:0:s.MaxDecodedSize]
+ s.Out, err = s.Decoder().Decompress1X(s.Out, in)
+ return s.Out, err
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// Before this is called, the table must be initialized with ReadTable unless
+// the encoder re-used the table.
+// The length of the supplied input must match the end of a block exactly.
+// The destination size of the uncompressed data must be known and provided.
+// deprecated: Use the stateless Decoder() to get a concurrent version.
+func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
+ if dstSize > s.MaxDecodedSize {
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ if cap(s.Out) < dstSize {
+ s.Out = make([]byte, s.MaxDecodedSize)
+ }
+ s.Out = s.Out[:0:dstSize]
+ s.Out, err = s.Decoder().Decompress4X(s.Out, in)
+ return s.Out, err
+}
+
+// Decoder will return a stateless decoder that can be used by multiple
+// decompressors concurrently.
+// Before this is called, the table must be initialized with ReadTable.
+// The Decoder is still linked to the scratch buffer so that cannot be reused.
+// However, it is safe to discard the scratch.
+func (s *Scratch) Decoder() *Decoder {
+ return &Decoder{
+ dt: s.dt,
+ actualTableLog: s.actualTableLog,
+ }
+}
+
+// Decoder provides stateless decoding.
+type Decoder struct {
+ dt dTable
+ actualTableLog uint8
+}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
var br bitReader
- err = br.init(in)
+ err := br.init(src)
if err != nil {
- return nil, err
+ return dst, err
}
- s.Out = s.Out[:0]
+ maxDecodedSize := cap(dst)
+ dst = dst[:0]
decode := func() byte {
- val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
- v := s.dt.single[val]
+ val := br.peekBitsFast(d.actualTableLog) /* note : actualTableLog >= 1 */
+ v := d.dt.single[val]
br.bitsRead += uint8(v.entry)
return uint8(v.entry >> 8)
}
@@ -180,88 +230,80 @@ func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
// Avoid bounds check by always having full sized table.
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
- dt := s.dt.single[:tlSize]
+ dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var tmp = s.huffWeight[:256]
+ var buf [256]byte
var off uint8
for br.off >= 8 {
br.fillFast()
- tmp[off+0] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
- tmp[off+1] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
+ buf[off+0] = hasDec(dt[br.peekBitsFast(d.actualTableLog)&tlMask])
+ buf[off+1] = hasDec(dt[br.peekBitsFast(d.actualTableLog)&tlMask])
br.fillFast()
- tmp[off+2] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
- tmp[off+3] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
+ buf[off+2] = hasDec(dt[br.peekBitsFast(d.actualTableLog)&tlMask])
+ buf[off+3] = hasDec(dt[br.peekBitsFast(d.actualTableLog)&tlMask])
off += 4
if off == 0 {
- if len(s.Out)+256 > s.MaxDecodedSize {
+ if len(dst)+256 > maxDecodedSize {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
- s.Out = append(s.Out, tmp...)
+ dst = append(dst, buf[:]...)
}
}
- if len(s.Out)+int(off) > s.MaxDecodedSize {
+ if len(dst)+int(off) > maxDecodedSize {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
- s.Out = append(s.Out, tmp[:off]...)
+ dst = append(dst, buf[:off]...)
for !br.finished() {
br.fill()
- if len(s.Out) >= s.MaxDecodedSize {
+ if len(dst) >= maxDecodedSize {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
- s.Out = append(s.Out, decode())
+ dst = append(dst, decode())
}
- return s.Out, br.close()
+ return dst, br.close()
}
// Decompress4X will decompress a 4X encoded stream.
-// Before this is called, the table must be initialized with ReadTable unless
-// the encoder re-used the table.
// The length of the supplied input must match the end of a block exactly.
-// The destination size of the uncompressed data must be known and provided.
-func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (s *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(s.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
- if len(in) < 6+(4*1) {
+ if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
- if dstSize > s.MaxDecodedSize {
- return nil, ErrMaxDecodedSizeExceeded
- }
- // TODO: We do not detect when we overrun a buffer, except if the last one does.
var br [4]bitReader
start := 6
for i := 0; i < 3; i++ {
- length := int(in[i*2]) | (int(in[i*2+1]) << 8)
- if start+length >= len(in) {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
- err = br[i].init(in[start : start+length])
+ err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
- err = br[3].init(in[start:])
+ err := br[3].init(src[start:])
if err != nil {
return nil, err
}
- // Prepare output
- if cap(s.Out) < dstSize {
- s.Out = make([]byte, 0, dstSize)
- }
- s.Out = s.Out[:dstSize]
// destination, offset to match first output
- dstOut := s.Out
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
dstEvery := (dstSize + 3) / 4
const tlSize = 1 << tableLogMax
@@ -276,7 +318,7 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
}
// Use temp table to avoid bound checks/append penalty.
- var tmp = s.huffWeight[:256]
+ var buf [256]byte
var off uint8
var decoded int
@@ -300,8 +342,8 @@ bigloop:
val2 := br[stream].peekBitsFast(s.actualTableLog)
v2 := single[val2&tlMask]
- tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
- tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+ buf[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+ buf[off+bufoff*stream] = uint8(v.entry >> 8)
br[stream].bitsRead += uint8(v2.entry)
}
@@ -313,8 +355,8 @@ bigloop:
val2 := br[stream].peekBitsFast(s.actualTableLog)
v2 := single[val2&tlMask]
- tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
- tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+ buf[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+ buf[off+bufoff*stream] = uint8(v.entry >> 8)
br[stream].bitsRead += uint8(v2.entry)
}
@@ -326,8 +368,8 @@ bigloop:
val2 := br[stream].peekBitsFast(s.actualTableLog)
v2 := single[val2&tlMask]
- tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
- tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+ buf[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+ buf[off+bufoff*stream] = uint8(v.entry >> 8)
br[stream].bitsRead += uint8(v2.entry)
}
@@ -339,8 +381,8 @@ bigloop:
val2 := br[stream].peekBitsFast(s.actualTableLog)
v2 := single[val2&tlMask]
- tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
- tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+ buf[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+ buf[off+bufoff*stream] = uint8(v.entry >> 8)
br[stream].bitsRead += uint8(v2.entry)
}
@@ -350,30 +392,30 @@ bigloop:
if bufoff > dstEvery {
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(dstOut, tmp[:bufoff])
- copy(dstOut[dstEvery:], tmp[bufoff:bufoff*2])
- copy(dstOut[dstEvery*2:], tmp[bufoff*2:bufoff*3])
- copy(dstOut[dstEvery*3:], tmp[bufoff*3:bufoff*4])
+ copy(out, buf[:bufoff])
+ copy(out[dstEvery:], buf[bufoff:bufoff*2])
+ copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
+ copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
off = 0
- dstOut = dstOut[bufoff:]
+ out = out[bufoff:]
decoded += 256
// There must at least be 3 buffers left.
- if len(dstOut) < dstEvery*3 {
+ if len(out) < dstEvery*3 {
return nil, errors.New("corruption detected: stream overrun 2")
}
}
}
if off > 0 {
ioff := int(off)
- if len(dstOut) < dstEvery*3+ioff {
+ if len(out) < dstEvery*3+ioff {
return nil, errors.New("corruption detected: stream overrun 3")
}
- copy(dstOut, tmp[:off])
- copy(dstOut[dstEvery:dstEvery+ioff], tmp[bufoff:bufoff*2])
- copy(dstOut[dstEvery*2:dstEvery*2+ioff], tmp[bufoff*2:bufoff*3])
- copy(dstOut[dstEvery*3:dstEvery*3+ioff], tmp[bufoff*3:bufoff*4])
+ copy(out, buf[:off])
+ copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
+ copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
+ copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
decoded += int(off) * 4
- dstOut = dstOut[off:]
+ out = out[off:]
}
// Decode remaining.
@@ -382,10 +424,10 @@ bigloop:
br := &br[i]
for !br.finished() {
br.fill()
- if offset >= len(dstOut) {
+ if offset >= len(out) {
return nil, errors.New("corruption detected: stream overrun 4")
}
- dstOut[offset] = decode(br)
+ out[offset] = decode(br)
offset++
}
decoded += offset - dstEvery*i
@@ -397,7 +439,7 @@ bigloop:
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
- return s.Out, nil
+ return dst, nil
}
// matches will compare a decoding table to a coding table.
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index bc977a302..f2a80b5d0 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -309,6 +309,20 @@ The decoder can be used for *concurrent* decompression of multiple buffers.
It will only allow a certain number of concurrent operations to run.
To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
+### Dictionaries
+
+Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed.
+
+Dictionaries are added individually to Decoders.
+Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder.
+To add a dictionary use the `RegisterDict(data)` with the dictionary data before starting any decompression.
+
+The dictionary will be used automatically for the data that specifies them.
+
+A re-used Decoder will still contain the dictionaries registered.
+
+When registering a dictionary with the same ID it will override the existing.
+
### Allocation-less operation
The decoder has been designed to operate without allocations after a warmup.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 19181caea..4a14242c7 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -461,26 +461,22 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if huff == nil {
huff = &huff0.Scratch{}
}
- huff.Out = b.literalBuf[:0]
huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil {
println("reading huffman table:", err)
return err
}
// Use our out buffer.
- huff.Out = b.literalBuf[:0]
- huff.MaxDecodedSize = litRegenSize
if fourStreams {
- literals, err = huff.Decompress4X(literals, litRegenSize)
+ literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
- literals, err = huff.Decompress1X(literals)
+ literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
}
if err != nil {
println("decoding compressed literals:", err)
return err
}
// Make sure we don't leak our literals buffer
- huff.Out = nil
if len(literals) != litRegenSize {
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
@@ -631,15 +627,12 @@ func (b *blockDec) decodeCompressed(hist *history) error {
var err error
// Use our out buffer.
huff = hist.huffTree
- huff.Out = b.literalBuf[:0]
- huff.MaxDecodedSize = litRegenSize
if fourStreams {
- literals, err = huff.Decompress4X(literals, litRegenSize)
+ literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
- literals, err = huff.Decompress1X(literals)
+ literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
}
// Make sure we don't leak our literals buffer
- huff.Out = nil
if err != nil {
println("decompressing literals:", err)
return err
@@ -649,12 +642,13 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
} else {
if hist.huffTree != nil && huff != nil {
- huffDecoderPool.Put(hist.huffTree)
+ if hist.dict == nil || hist.dict.litDec != hist.huffTree {
+ huffDecoderPool.Put(hist.huffTree)
+ }
hist.huffTree = nil
}
}
if huff != nil {
- huff.Out = nil
hist.huffTree = huff
}
if debug {
@@ -687,14 +681,20 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// If only recent offsets were not transferred, this would be an obvious win.
// Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
- if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
- println("initializing sequences:", err)
- return err
- }
hbytes := hist.b
if len(hbytes) > hist.windowSize {
hbytes = hbytes[len(hbytes)-hist.windowSize:]
+ // We do not need history any more.
+ if hist.dict != nil {
+ hist.dict.content = nil
+ }
}
+
+ if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
+ println("initializing sequences:", err)
+ return err
+ }
+
err = seqs.decode(nSeqs, br, hbytes)
if err != nil {
return err
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
index dc4378b64..f708df1c4 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytereader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go
@@ -4,6 +4,8 @@
package zstd
+import "encoding/binary"
+
// byteReader provides a byte reader that reads
// little endian values from a byte stream.
// The input stream is manually advanced.
@@ -55,12 +57,7 @@ func (b byteReader) Uint32() uint32 {
}
return v
}
- b2 := b.b[b.off : b.off+4 : b.off+4]
- v3 := uint32(b2[3])
- v2 := uint32(b2[2])
- v1 := uint32(b2[1])
- v0 := uint32(b2[0])
- return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+ return binary.LittleEndian.Uint32(b.b[b.off : b.off+4])
}
// unread returns the unread portion of the input.
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index 324347623..8e34479ff 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -32,8 +32,9 @@ type Decoder struct {
// Current read position used for Reader functionality.
current decoderState
- // Custom dictionaries
- dicts map[uint32]struct{}
+ // Custom dictionaries.
+ // Always uses copies.
+ dicts map[uint32]dict
// streamWg is the waitgroup for all streams
streamWg sync.WaitGroup
@@ -295,10 +296,18 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.bBuf = input
for {
+ frame.history.reset()
err := frame.reset(&frame.bBuf)
if err == io.EOF {
return dst, nil
}
+ if frame.DictionaryID != nil {
+ dict, ok := d.dicts[*frame.DictionaryID]
+ if !ok {
+ return nil, ErrUnknownDictionary
+ }
+ frame.history.setDict(&dict)
+ }
if err != nil {
return dst, err
}
@@ -393,6 +402,19 @@ func (d *Decoder) Close() {
d.current.err = ErrDecoderClosed
}
+// RegisterDict will load a dictionary
+func (d *Decoder) RegisterDict(b []byte) error {
+ dc, err := loadDict(b)
+ if err != nil {
+ return err
+ }
+ if d.dicts == nil {
+ d.dicts = make(map[uint32]dict, 1)
+ }
+ d.dicts[dc.id] = *dc
+ return nil
+}
+
// IOReadCloser returns the decoder as an io.ReadCloser for convenience.
// Any changes to the decoder will be reflected, so the returned ReadCloser
// can be reused along with the decoder.
@@ -466,6 +488,14 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
if debug && err != nil {
println("Frame decoder returned", err)
}
+ if err == nil && frame.DictionaryID != nil {
+ dict, ok := d.dicts[*frame.DictionaryID]
+ if !ok {
+ err = ErrUnknownDictionary
+ } else {
+ frame.history.setDict(&dict)
+ }
+ }
if err != nil {
stream.output <- decodeOutput{
err: err,
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
new file mode 100644
index 000000000..8eb6f6ba3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -0,0 +1,104 @@
+package zstd
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/klauspost/compress/huff0"
+)
+
+type dict struct {
+ id uint32
+
+ litDec *huff0.Scratch
+ llDec, ofDec, mlDec sequenceDec
+ offsets [3]int
+ content []byte
+}
+
+var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
+
+// Load a dictionary as described in
+// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
+func loadDict(b []byte) (*dict, error) {
+ // Check static field size.
+ if len(b) <= 8+(3*4) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ d := dict{
+ llDec: sequenceDec{fse: &fseDecoder{}},
+ ofDec: sequenceDec{fse: &fseDecoder{}},
+ mlDec: sequenceDec{fse: &fseDecoder{}},
+ }
+ if !bytes.Equal(b[:4], dictMagic[:]) {
+ return nil, ErrMagicMismatch
+ }
+ d.id = binary.LittleEndian.Uint32(b[4:8])
+ if d.id == 0 {
+ return nil, errors.New("dictionaries cannot have ID 0")
+ }
+
+ // Read literal table
+ var err error
+ d.litDec, b, err = huff0.ReadTable(b[8:], nil)
+ if err != nil {
+ return nil, err
+ }
+
+ br := byteReader{
+ b: b,
+ off: 0,
+ }
+ readDec := func(i tableIndex, dec *fseDecoder) error {
+ if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil {
+ return err
+ }
+ if br.overread() {
+ return io.ErrUnexpectedEOF
+ }
+ err = dec.transform(symbolTableX[i])
+ if err != nil {
+ println("Transform table error:", err)
+ return err
+ }
+ if debug {
+ println("Read table ok", "symbolLen:", dec.symbolLen)
+ }
+ // Set decoders as predefined so they aren't reused.
+ dec.preDefined = true
+ return nil
+ }
+
+ if err := readDec(tableOffsets, d.ofDec.fse); err != nil {
+ return nil, err
+ }
+ if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil {
+ return nil, err
+ }
+ if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil {
+ return nil, err
+ }
+ if br.remain() < 12 {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ d.offsets[0] = int(br.Uint32())
+ br.advance(4)
+ d.offsets[1] = int(br.Uint32())
+ br.advance(4)
+ d.offsets[2] = int(br.Uint32())
+ br.advance(4)
+ if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 {
+ return nil, errors.New("invalid offset in dictionary")
+ }
+ d.content = make([]byte, br.remain())
+ copy(d.content, br.unread())
+ if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) {
+ return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets)
+ }
+
+ return &d, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index 5ebead9dc..50276bcde 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -671,4 +671,8 @@ encodeLoop:
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
+ // We do not store history, so we must offset e.cur to avoid false matches for next user.
+ if e.cur < bufferReset {
+ e.cur += int32(len(src))
+ }
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index d1d3658e6..4104b456c 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -383,6 +383,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
panic("src too big")
}
}
+
// Protect against e.cur wraparound.
if e.cur >= bufferReset {
for i := range e.table[:] {
@@ -516,6 +517,9 @@ encodeLoop:
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
+ if debugAsserts && t < 0 {
+ panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff))
+ }
break
}
@@ -548,6 +552,9 @@ encodeLoop:
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
+ if debugAsserts && t < 0 {
+ panic(fmt.Sprintf("t (%d) < 0 ", t))
+ }
// Extend the 4-byte match as long as possible.
//l := e.matchlenNoHist(s+4, t+4, src) + 4
// l := int32(matchLen(src[s+4:], src[t+4:])) + 4
@@ -647,6 +654,10 @@ encodeLoop:
if debug {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
+ // We do not store history, so we must offset e.cur to avoid false matches for next user.
+ if e.cur < bufferReset {
+ e.cur += int32(len(src))
+ }
}
func (e *fastBase) addBlock(src []byte) int32 {
@@ -714,7 +725,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
}
// Reset the encoding table.
-func (e *fastBase) Reset() {
+func (e *fastBase) Reset(singleBlock bool) {
if e.blk == nil {
e.blk = &blockEnc{}
e.blk.init()
@@ -727,7 +738,7 @@ func (e *fastBase) Reset() {
} else {
e.crc.Reset()
}
- if cap(e.hist) < int(e.maxMatchOff*2) {
+ if !singleBlock && cap(e.hist) < int(e.maxMatchOff*2) {
l := e.maxMatchOff * 2
// Make it at least 1MB.
if l < 1<<20 {
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index af4f00b73..bf42bb1cf 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -35,7 +35,7 @@ type encoder interface {
AppendCRC([]byte) []byte
WindowSize(size int) int32
UseBlock(*blockEnc)
- Reset()
+ Reset(singleBlock bool)
}
type encoderState struct {
@@ -82,7 +82,10 @@ func (e *Encoder) initialize() {
}
e.encoders = make(chan encoder, e.o.concurrent)
for i := 0; i < e.o.concurrent; i++ {
- e.encoders <- e.o.encoder()
+ enc := e.o.encoder()
+ // If not single block, history will be allocated on first use.
+ enc.Reset(true)
+ e.encoders <- enc
}
}
@@ -112,7 +115,7 @@ func (e *Encoder) Reset(w io.Writer) {
s.filling = s.filling[:0]
s.current = s.current[:0]
s.previous = s.previous[:0]
- s.encoder.Reset()
+ s.encoder.Reset(false)
s.headerWritten = false
s.eofWritten = false
s.fullFrameWritten = false
@@ -445,11 +448,10 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
enc := <-e.encoders
defer func() {
// Release encoder reference to last block.
- enc.Reset()
+ // If a non-single block is needed the encoder will reset again.
+ enc.Reset(true)
e.encoders <- enc
}()
- enc.Reset()
- blk := enc.Block()
// Use single segments when above minimum window and below 1MB.
single := len(src) < 1<<20 && len(src) > MinWindowSize
if e.o.single != nil {
@@ -472,12 +474,13 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
panic(err)
}
- if len(src) <= e.o.blockSize && len(src) <= maxBlockSize {
+ // If we can do everything in one block, prefer that.
+ if len(src) <= maxCompressedBlockSize {
// Slightly faster with no history and everything in one block.
if e.o.crc {
_, _ = enc.CRC().Write(src)
}
- blk.reset(nil)
+ blk := enc.Block()
blk.last = true
enc.EncodeNoHist(blk, src)
@@ -504,6 +507,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
blk.output = oldout
} else {
+ enc.Reset(false)
+ blk := enc.Block()
for len(src) > 0 {
todo := src
if len(todo) > e.o.blockSize {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 780880ebe..fc4a566d3 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -40,7 +40,7 @@ type frameDec struct {
FrameContentSize uint64
frameDone sync.WaitGroup
- DictionaryID uint32
+ DictionaryID *uint32
HasCheckSum bool
SingleSegment bool
@@ -142,7 +142,7 @@ func (d *frameDec) reset(br byteBuffer) error {
// Read Dictionary_ID
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
- d.DictionaryID = 0
+ d.DictionaryID = nil
if size := fhd & 3; size != 0 {
if size == 3 {
size = 4
@@ -154,19 +154,22 @@ func (d *frameDec) reset(br byteBuffer) error {
}
return io.ErrUnexpectedEOF
}
+ var id uint32
switch size {
case 1:
- d.DictionaryID = uint32(b[0])
+ id = uint32(b[0])
case 2:
- d.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8)
+ id = uint32(b[0]) | (uint32(b[1]) << 8)
case 4:
- d.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
}
if debug {
- println("Dict size", size, "ID:", d.DictionaryID)
+ println("Dict size", size, "ID:", id)
}
- if d.DictionaryID != 0 {
- return ErrUnknownDictionary
+ if id > 0 {
+ // ID 0 means "sorry, no dictionary anyway".
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
+ d.DictionaryID = &id
}
}
@@ -351,8 +354,6 @@ func (d *frameDec) initAsync() {
// When the frame has finished decoding the *bufio.Reader
// containing the remaining input will be sent on frameDec.frameDone.
func (d *frameDec) startDecoder(output chan decodeOutput) {
- // TODO: Init to dictionary
- d.history.reset()
written := int64(0)
defer func() {
@@ -445,8 +446,6 @@ func (d *frameDec) startDecoder(output chan decodeOutput) {
// runDecoder will create a sync decoder that will decode a block of data.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
- // TODO: Init to dictionary
- d.history.reset()
saved := d.history.b
// We use the history for output to avoid copying it.
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
index e002be98b..957cfeb79 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -19,7 +19,7 @@ const (
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
- maxMemoryUsage = 11
+ maxMemoryUsage = tablelogAbsoluteMax + 2
maxTableLog = maxMemoryUsage - 2
maxTablesize = 1 << maxTableLog
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
index e8c419bd5..f418f50fc 100644
--- a/vendor/github.com/klauspost/compress/zstd/history.go
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -17,6 +17,7 @@ type history struct {
windowSize int
maxSize int
error bool
+ dict *dict
}
// reset will reset the history to initial state of a frame.
@@ -36,12 +37,27 @@ func (h *history) reset() {
}
h.decoders = sequenceDecs{}
if h.huffTree != nil {
- huffDecoderPool.Put(h.huffTree)
+ if h.dict == nil || h.dict.litDec != h.huffTree {
+ huffDecoderPool.Put(h.huffTree)
+ }
}
h.huffTree = nil
+ h.dict = nil
//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
}
+func (h *history) setDict(dict *dict) {
+ if dict == nil {
+ return
+ }
+ h.dict = dict
+ h.decoders.litLengths = dict.llDec
+ h.decoders.offsets = dict.ofDec
+ h.decoders.matchLengths = dict.mlDec
+ h.recentOffsets = dict.offsets
+ h.huffTree = dict.litDec
+}
+
// append bytes to history.
// This function will make sure there is space for it,
// if the buffer has been allocated with enough extra space.
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index 39238e16a..7ff870400 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -62,6 +62,7 @@ type sequenceDecs struct {
matchLengths sequenceDec
prevOffset [3]int
hist []byte
+ dict []byte
literals []byte
out []byte
windowSize int
@@ -85,6 +86,10 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
s.windowSize = hist.windowSize
s.out = out
+ s.dict = nil
+ if hist.dict != nil {
+ s.dict = hist.dict.content
+ }
return nil
}
@@ -100,23 +105,78 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
printf("reading sequence %d, exceeded available data\n", seqs-i)
return io.ErrUnexpectedEOF
}
- var litLen, matchOff, matchLen int
+ var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
- litLen, matchOff, matchLen = s.nextFast(br, llState, mlState, ofState)
+ // inlined function:
+ // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
+
+ // Final will not read from stream.
+ var llB, mlB, moB uint8
+ ll, llB = llState.final()
+ ml, mlB = mlState.final()
+ mo, moB = ofState.final()
+
+ // extra bits are stored in reverse order.
+ br.fillFast()
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
+ br.fillFast()
+ }
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+
+ if moB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = mo
+ } else {
+ // mo = s.adjustOffset(mo, ll, moB)
+ // Inlined for rather big speedup
+ if ll == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ mo++
+ }
+
+ if mo == 0 {
+ mo = s.prevOffset[0]
+ } else {
+ var temp int
+ if mo == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[mo]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("temp was 0")
+ temp = 1
+ }
+
+ if mo != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ mo = temp
+ }
+ }
br.fillFast()
} else {
- litLen, matchOff, matchLen = s.next(br, llState, mlState, ofState)
+ ll, mo, ml = s.next(br, llState, mlState, ofState)
br.fill()
}
if debugSequences {
- println("Seq", seqs-i-1, "Litlen:", litLen, "matchOff:", matchOff, "(abs) matchLen:", matchLen)
+ println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
}
- if litLen > len(s.literals) {
- return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", litLen, len(s.literals))
+ if ll > len(s.literals) {
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
}
- size := litLen + matchLen + len(s.out)
+ size := ll + ml + len(s.out)
if size-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size", size)
}
@@ -127,52 +187,70 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
s.out = append(s.out, make([]byte, maxBlockSize)...)
s.out = s.out[:len(s.out)-maxBlockSize]
}
- if matchLen > maxMatchLen {
- return fmt.Errorf("match len (%d) bigger than max allowed length", matchLen)
- }
- if matchOff > len(s.out)+len(hist)+litLen {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", matchOff, len(s.out)+len(hist)+litLen)
- }
- if matchOff > s.windowSize {
- return fmt.Errorf("match offset (%d) bigger than window size (%d)", matchOff, s.windowSize)
- }
- if matchOff == 0 && matchLen > 0 {
- return fmt.Errorf("zero matchoff and matchlen > 0")
+ if ml > maxMatchLen {
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
- s.out = append(s.out, s.literals[:litLen]...)
- s.literals = s.literals[litLen:]
+ // Add literals
+ s.out = append(s.out, s.literals[:ll]...)
+ s.literals = s.literals[ll:]
out := s.out
+ if mo > len(s.out)+len(hist) || mo > s.windowSize {
+ if len(s.dict) == 0 {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+ }
+
+ // we may be in dictionary.
+ dictO := len(s.dict) - (mo - (len(s.out) + len(hist)))
+ if dictO < 0 || dictO >= len(s.dict) {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+ }
+ end := dictO + ml
+ if end > len(s.dict) {
+ out = append(out, s.dict[dictO:]...)
+ mo -= len(s.dict) - dictO
+ ml -= len(s.dict) - dictO
+ } else {
+ out = append(out, s.dict[dictO:end]...)
+ mo = 0
+ ml = 0
+ }
+ }
+
+ if mo == 0 && ml > 0 {
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+ }
+
// Copy from history.
// TODO: Blocks without history could be made to ignore this completely.
- if v := matchOff - len(s.out); v > 0 {
+ if v := mo - len(s.out); v > 0 {
// v is the start position in history from end.
start := len(s.hist) - v
- if matchLen > v {
+ if ml > v {
// Some goes into current block.
// Copy remainder of history
out = append(out, s.hist[start:]...)
- matchOff -= v
- matchLen -= v
+ mo -= v
+ ml -= v
} else {
- out = append(out, s.hist[start:start+matchLen]...)
- matchLen = 0
+ out = append(out, s.hist[start:start+ml]...)
+ ml = 0
}
}
// We must be in current buffer now
- if matchLen > 0 {
- start := len(s.out) - matchOff
- if matchLen <= len(s.out)-start {
+ if ml > 0 {
+ start := len(s.out) - mo
+ if ml <= len(s.out)-start {
// No overlap
- out = append(out, s.out[start:start+matchLen]...)
+ out = append(out, s.out[start:start+ml]...)
} else {
// Overlapping copy
// Extend destination slice and copy one byte at the time.
- out = out[:len(out)+matchLen]
- src := out[start : start+matchLen]
+ out = out[:len(out)+ml]
+ src := out[start : start+ml]
// Destination is the space we just added.
- dst := out[len(out)-matchLen:]
+ dst := out[len(out)-ml:]
dst = dst[:len(src)]
for i := range src {
dst[i] = src[i]
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 325557bcc..e7232d5a2 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -154,7 +154,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.20.1
+# github.com/containers/storage v1.20.2
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -324,7 +324,7 @@ github.com/inconshreveable/mousetrap
github.com/ishidawataru/sctp
# github.com/json-iterator/go v1.1.9
github.com/json-iterator/go
-# github.com/klauspost/compress v1.10.6
+# github.com/klauspost/compress v1.10.7
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0