diff options
Diffstat (limited to 'vendor/github.com')
72 files changed, 3727 insertions, 3739 deletions
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 619e077d0..e4b38947d 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -19,9 +19,11 @@ env: #### # GCE project where images live IMAGE_PROJECT: "libpod-218412" - FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-30-1-2-1556821664" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-29-1-2-1541789245" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1904-disco-v20190514" + _BUILT_IMAGE_SUFFIX: "libpod-6228273469587456" + FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-30-${_BUILT_IMAGE_SUFFIX}" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}" + PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-${_BUILT_IMAGE_SUFFIX}" #### #### Command variables to help avoid duplication @@ -49,11 +51,14 @@ gce_instance: image_name: "${FEDORA_CACHE_IMAGE_NAME}" testing_task: + depends_on: + - lint gce_instance: # Only need to specify differences from defaults (above) matrix: # Duplicate this task for each matrix product. image_name: "${FEDORA_CACHE_IMAGE_NAME}" image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" image_name: "${UBUNTU_CACHE_IMAGE_NAME}" + # image_name: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" # No fuse3 support # Separate scripts for separate outputs, makes debugging easier. setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' @@ -99,6 +104,7 @@ meta_task: ${FEDORA_CACHE_IMAGE_NAME} ${PRIOR_FEDORA_CACHE_IMAGE_NAME} ${UBUNTU_CACHE_IMAGE_NAME} + ${PRIOR_UBUNTU_CACHE_IMAGE_NAME} BUILDID: "${CIRRUS_BUILD_ID}" REPOREF: "${CIRRUS_CHANGE_IN_REPO}" GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826] @@ -110,7 +116,7 @@ meta_task: vendor_task: container: - image: golang:1.12 + image: golang:1.13 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod diff --git a/vendor/github.com/containers/storage/.travis.yml b/vendor/github.com/containers/storage/.travis.yml index dc1c61391..a7865729d 100644 --- a/vendor/github.com/containers/storage/.travis.yml +++ b/vendor/github.com/containers/storage/.travis.yml @@ -15,30 +15,21 @@ env: - GO_VERSION="stable" DISTRO="ubuntu" - - GO_VERSION="1.11" - DISTRO="ubuntu" - - - GO_VERSION="1.12" + - GO_VERSION="1.12.12" DISTRO="ubuntu" # Fedora - GO_VERSION="stable" DISTRO="fedora" - - GO_VERSION="1.11" - DISTRO="fedora" - - - GO_VERSION="1.12" + - GO_VERSION="1.12.12" DISTRO="fedora" # CentOS - GO_VERSION="stable" DISTRO="centos" - - GO_VERSION="1.11" - DISTRO="centos" - - - GO_VERSION="1.12" + - GO_VERSION="1.12.12" DISTRO="centos" # GO_VERSION="stable" builds successfully, but tests fail on all platforms. diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 90e5ca499..83005307f 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -127,6 +127,9 @@ lint: install.tools help: ## this help @awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) +vendor-in-container: + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor + vendor: export GO111MODULE=on \ $(GO) mod tidy && \ diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 43ded9062..850e74240 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.13.5 +1.14.0 diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index d614b78fc..e52545d5b 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -155,7 +155,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { switch mode := f.Mode(); { case mode.IsRegular(): - id := fileID{dev: stat.Dev, ino: stat.Ino} + id := fileID{dev: uint64(stat.Dev), ino: stat.Ino} if copyMode == Hardlink { isHardlink = true if err2 := os.Link(srcPath, dstPath); err2 != nil { diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index a45f6b44c..dddf8a8b4 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -48,6 +48,8 @@ const ( FsMagicZfs = FsMagic(0x2fc12fc1) // FsMagicOverlay filesystem id for overlay FsMagicOverlay = FsMagic(0x794C7630) + // FsMagicFUSE filesystem id for FUSE + FsMagicFUSE = FsMagic(0x65735546) ) var ( diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 97222fe7a..13cd65fa5 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -231,13 +231,18 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } } + fileSystemType := graphdriver.FsMagicOverlay + if opts.mountProgram != "" { + fileSystemType = graphdriver.FsMagicFUSE + } + d := &Driver{ name: "overlay", home: home, runhome: runhome, uidMaps: options.UIDMaps, gidMaps: options.GIDMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)), supportsDType: supportsDType, usingMetacopy: usingMetacopy, locker: locker.New(), @@ -1016,8 +1021,39 @@ func (d *Driver) Put(id string) error { if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { return err } - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { - logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + + unmounted := false + + if d.options.mountProgram != "" { + // Attempt to unmount the FUSE mount using either fusermount or fusermount3. + // If they fail, fallback to unix.Unmount + for _, v := range []string{"fusermount3", "fusermount"} { + err := exec.Command(v, "-u", mountpoint).Run() + if err != nil && !os.IsNotExist(err) { + logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err) + } + if err == nil { + unmounted = true + break + } + } + // If fusermount|fusermount3 failed to unmount the FUSE file system, make sure all + // pending changes are propagated to the file system + if !unmounted { + fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0) + if err == nil { + if err := unix.Syncfs(fd); err != nil { + logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err) + } + unix.Close(fd) + } + } + } + + if !unmounted { + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } } if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod index 934e82ad2..f9d7a199c 100644 --- a/vendor/github.com/containers/storage/go.mod +++ b/vendor/github.com/containers/storage/go.mod @@ -3,22 +3,22 @@ module github.com/containers/storage require ( github.com/BurntSushi/toml v0.3.1 github.com/DataDog/zstd v1.4.0 // indirect - github.com/Microsoft/go-winio v0.4.12 + github.com/Microsoft/go-winio v0.4.14 github.com/Microsoft/hcsshim v0.8.6 - github.com/docker/docker v0.0.0-20171019062838-86f080cff091 + github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect github.com/docker/go-units v0.4.0 - github.com/klauspost/compress v1.7.2 + github.com/klauspost/compress v1.9.2 github.com/klauspost/cpuid v1.2.1 // indirect github.com/klauspost/pgzip v1.2.1 - github.com/mattn/go-shellwords v1.0.5 + github.com/mattn/go-shellwords v1.0.6 github.com/mistifyio/go-zfs v2.1.1+incompatible github.com/opencontainers/go-digest v1.0.0-rc1 - github.com/opencontainers/runc v1.0.0-rc8 - github.com/opencontainers/selinux v1.2.2 + github.com/opencontainers/runc v1.0.0-rc9 + github.com/opencontainers/selinux v1.3.0 github.com/pkg/errors v0.8.1 github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 github.com/sirupsen/logrus v1.4.2 - github.com/stretchr/testify v1.3.0 + github.com/stretchr/testify v1.4.0 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 github.com/tchap/go-patricia v2.3.0+incompatible github.com/vbatts/tar-split v0.11.1 @@ -26,3 +26,5 @@ require ( golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb gotest.tools v0.0.0-20190624233834-05ebafbffc79 ) + +go 1.13 diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum index a0e05dd1d..6a81628bf 100644 --- a/vendor/github.com/containers/storage/go.sum +++ b/vendor/github.com/containers/storage/go.sum @@ -4,8 +4,20 @@ github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo= github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM= +github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50 h1:WMpHmC6AxwWb9hMqhudkqG7A/p14KiMnl6d3r1iUMjU= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -13,10 +25,18 @@ github.com/docker/docker v0.0.0-20171019062838-86f080cff091 h1:QpxpTw4MJeOzbC7X0 github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4= +github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5g2Y= github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.1 h1:TWy0o9J9c6LK9C8t7Msh6IAJNXbsU/nvKLTQUU5HdaY= +github.com/klauspost/compress v1.9.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY= +github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= @@ -25,16 +45,26 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGi github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8 h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/aNDyhECt0= github.com/opencontainers/runc v1.0.0-rc8/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg= github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= +github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g= +github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -42,28 +72,49 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE= github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= +github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f h1:nBX3nTcmxEtHSERBJaIo1Qa26VwRaopnZmfDQUXsF4I= +github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v0.0.0-20190624233834-05ebafbffc79 h1:C+K4iPg1rIvmCf4JjelkbWv2jeWevEwp05Lz8XfTYgE= gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 20f017e64..ba635ae91 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -821,11 +821,12 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { - skip, err = pm.Matches(relFilePath) + matches, err := pm.IsMatch(relFilePath) if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } + skip = matches } if skip { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 5602c7e21..3a47eceae 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -61,10 +61,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi } if statErr == nil { if stat.Mode()&os.ModeCharDevice != 0 { - // It's a whiteout for this directory, so it can't have been - // both deleted and recreated in the layer we're diffing. - s := stat.Sys().(*syscall.Stat_t) - if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + if isWhiteOut(stat) { return nil, nil } } @@ -98,8 +95,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi // If it's whiteout for a parent directory, then the // original directory wasn't inherited into this layer, // so we don't need to emit whiteout for it. - s := stat.Sys().(*syscall.Stat_t) - if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + if isWhiteOut(stat) { return nil, nil } } @@ -141,3 +137,8 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, return true, nil } + +func isWhiteOut(stat os.FileInfo) bool { + s := stat.Sys().(*syscall.Stat_t) + return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go index b12309361..dc313d1ab 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -307,9 +307,7 @@ func overlayLowerContainsWhiteout(root, path string) (bool, error) { return false, err } if err == nil && stat.Mode()&os.ModeCharDevice != 0 { - // Check if there's whiteout for the specified item in the specified layer. - s := stat.Sys().(*syscall.Stat_t) - if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + if isWhiteOut(stat) { return true, nil } } @@ -319,8 +317,7 @@ func overlayLowerContainsWhiteout(root, path string) (bool, error) { func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (string, error) { // If it's a whiteout item, then a file or directory with that name is removed by this layer. if fi.Mode()&os.ModeCharDevice != 0 { - s := fi.Sys().(*syscall.Stat_t) - if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + if isWhiteOut(fi) { return path, nil } } @@ -350,10 +347,7 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str } if err == nil { if stat.Mode()&os.ModeCharDevice != 0 { - // It's a whiteout for this directory, so it can't have been - // deleted in this layer. - s := stat.Sys().(*syscall.Stat_t) - if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + if isWhiteOut(stat) { return "", nil } } @@ -370,10 +364,7 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str } if err == nil { if stat.Mode()&os.ModeCharDevice != 0 { - // If it's whiteout for a parent directory, then the - // original directory wasn't inherited into the top layer. - s := stat.Sys().(*syscall.Stat_t) - if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + if isWhiteOut(stat) { return "", nil } } diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index 091040140..18a65bc0a 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -1,5 +1,9 @@ package config +import ( + "fmt" +) + // ThinpoolOptionsConfig represents the "storage.options.thinpool" // TOML config table. type ThinpoolOptionsConfig struct { @@ -47,6 +51,9 @@ type ThinpoolOptionsConfig struct { // devices. MountOpt string `toml:"mountopt"` + // Size + Size string `toml:"size"` + // UseDeferredDeletion marks device for deferred deletion UseDeferredDeletion string `toml:"use_deferred_deletion"` @@ -59,6 +66,47 @@ type ThinpoolOptionsConfig struct { XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"` } +type AufsOptionsConfig struct { + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt"` +} + +type BtrfsOptionsConfig struct { + // MinSpace is the minimal spaces allocated to the device + MinSpace string `toml:"min_space"` + // Size + Size string `toml:"size"` +} + +type OverlayOptionsConfig struct { + // IgnoreChownErrors is a flag for whether chown errors should be + // ignored when building an image. + IgnoreChownErrors string `toml:"ignore_chown_errors"` + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt"` + // Alternative program to use for the mount of the file system + MountProgram string `toml:"mount_program"` + // Size + Size string `toml:"size"` + // Do not create a bind mount on the storage home + SkipMountHome string `toml:"skip_mount_home"` +} + +type VfsOptionsConfig struct { + // IgnoreChownErrors is a flag for whether chown errors should be + // ignored when building an image. + IgnoreChownErrors string `toml:"ignore_chown_errors"` +} + +type ZfsOptionsConfig struct { + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt"` + // Name is the File System name of the ZFS File system + Name string `toml:"fsname"` + // Size + Size string `toml:"size"` +} + // OptionsConfig represents the "storage.options" TOML config table. type OptionsConfig struct { // AdditionalImagesStores is the location of additional read/only @@ -83,12 +131,158 @@ type OptionsConfig struct { // RemapGroup is the name of one or more entries in /etc/subgid which // should be used to set up default GID mappings. RemapGroup string `toml:"remap-group"` + + // Aufs container options to be handed to aufs drivers + Aufs struct{ AufsOptionsConfig } `toml:"aufs"` + + // Btrfs container options to be handed to btrfs drivers + Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"` + // Thinpool container options to be handed to thinpool drivers Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"` + // Overlay container options to be handed to overlay drivers + Overlay struct{ OverlayOptionsConfig } `toml:"overlay"` + + // Vfs container options to be handed to VFS drivers + Vfs struct{ VfsOptionsConfig } `toml:"vfs"` + + // Zfs container options to be handed to ZFS drivers + Zfs struct{ ZfsOptionsConfig } `toml:"zfs"` + + // Do not create a bind mount on the storage home + SkipMountHome string `toml:"skip_mount_home"` + // Alternative program to use for the mount of the file system MountProgram string `toml:"mount_program"` // MountOpt specifies extra mount options used when mounting MountOpt string `toml:"mountopt"` } + +// GetGraphDriverOptions returns the driver specific options +func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { + var doptions []string + switch driverName { + case "aufs": + if options.Aufs.MountOpt != "" { + return append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Aufs.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + + case "btrfs": + if options.Btrfs.MinSpace != "" { + return append(doptions, fmt.Sprintf("%s.min_space=%s", driverName, options.Btrfs.MinSpace)) + } + if options.Btrfs.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Btrfs.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + + case "devicemapper": + if options.Thinpool.AutoExtendPercent != "" { + doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent)) + } + if options.Thinpool.AutoExtendThreshold != "" { + doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold)) + } + if options.Thinpool.BaseSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize)) + } + if options.Thinpool.BlockSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize)) + } + if options.Thinpool.DirectLvmDevice != "" { + doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice)) + } + if options.Thinpool.DirectLvmDeviceForce != "" { + doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce)) + } + if options.Thinpool.Fs != "" { + doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs)) + } + if options.Thinpool.LogLevel != "" { + doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel)) + } + if options.Thinpool.MinFreeSpace != "" { + doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace)) + } + if options.Thinpool.MkfsArg != "" { + doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg)) + } + if options.Thinpool.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + + if options.Thinpool.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + + if options.Thinpool.UseDeferredDeletion != "" { + doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion)) + } + if options.Thinpool.UseDeferredRemoval != "" { + doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval)) + } + if options.Thinpool.XfsNoSpaceMaxRetries != "" { + doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries)) + } + + case "overlay": + if options.Overlay.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors)) + } else if options.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors)) + } + if options.Overlay.MountProgram != "" { + doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.Overlay.MountProgram)) + } else if options.MountProgram != "" { + doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.MountProgram)) + } + if options.Overlay.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Overlay.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + if options.Overlay.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Overlay.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + + if options.Overlay.SkipMountHome != "" { + doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.Overlay.SkipMountHome)) + } else if options.SkipMountHome != "" { + doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.SkipMountHome)) + } + + case "vfs": + if options.Vfs.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors)) + } else if options.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors)) + } + + case "zfs": + if options.Zfs.Name != "" { + doptions = append(doptions, fmt.Sprintf("%s.fsname=%s", driverName, options.Zfs.Name)) + } + if options.Zfs.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Zfs.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + if options.Zfs.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Zfs.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + } + return doptions +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go index a129e654e..388eaf9d5 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -57,6 +57,7 @@ func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { return pm, nil } +// Deprecated: Please use the `MatchesResult` method instead. // Matches matches path against all the patterns. Matches is not safe to be // called concurrently func (pm *PatternMatcher) Matches(file string) (bool, error) { @@ -96,6 +97,85 @@ func (pm *PatternMatcher) Matches(file string) (bool, error) { return matched, nil } +type MatchResult struct { + isMatched bool + matches, excludes uint +} + +// Excludes returns true if the overall result is matched +func (m *MatchResult) IsMatched() bool { + return m.isMatched +} + +// Excludes returns the amount of matches of an MatchResult +func (m *MatchResult) Matches() uint { + return m.matches +} + +// Excludes returns the amount of excludes of an MatchResult +func (m *MatchResult) Excludes() uint { + return m.excludes +} + +// MatchesResult verifies the provided filepath against all patterns. +// It returns the `*MatchResult` result for the patterns on success, otherwise +// an error. This method is not safe to be called concurrently. +func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err error) { + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + res = &MatchResult{false, 0, 0} + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return nil, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join( + parentPathDirs[:len(pattern.dirs)], + string(os.PathSeparator)), + ) + } + } + + if match { + res.isMatched = !negative + if negative { + res.excludes++ + } else { + res.matches++ + } + } + } + + if res.matches > 0 { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return res, nil +} + +// IsMatch verifies the provided filepath against all patterns and returns true +// if it matches. A match is valid if the last match is a positive one. +// It returns an error on failure and is not safe to be called concurrently. +func (pm *PatternMatcher) IsMatch(file string) (matched bool, err error) { + res, err := pm.MatchesResult(file) + if err != nil { + return false, err + } + return res.isMatched, nil +} + // Exclusions returns true if any of the patterns define exclusions func (pm *PatternMatcher) Exclusions() bool { return pm.exclusions @@ -228,7 +308,7 @@ func Matches(file string, patterns []string) (bool, error) { return false, nil } - return pm.Matches(file) + return pm.IsMatch(file) } // CopyFile copies from src to dst until either EOF is reached diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go index e59867277..3d3829236 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -63,7 +63,7 @@ func GetKernelVersion() (*VersionInfo, error) { } KVI.major = int(dwVersion & 0xFF) - KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.minor = int((dwVersion & 0xFF00) >> 8) KVI.build = int((dwVersion & 0xFFFF0000) >> 16) return KVI, nil diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go index 1939f9518..af7af20fa 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -8,7 +8,7 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mode: s.Mode, uid: s.Uid, gid: s.Gid, - rdev: s.Rdev, + rdev: uint64(s.Rdev), mtim: s.Mtim}, nil } diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index efd46eefb..db6d35768 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -21,25 +21,6 @@ graphroot = "/var/lib/containers/storage" additionalimagestores = [ ] -# Size is used to set a maximum size of the container image. Only supported by -# certain container storage drivers. -size = "" - -# Path to an helper program to use for mounting the file system instead of mounting it -# directly. -#mount_program = "/usr/bin/fuse-overlayfs" - -# mountopt specifies comma separated list of extra mount options -mountopt = "nodev" - -# ignore_chown_errors can be set to allow a non privileged user running with -# a single UID within a user namespace to run containers. The user can pull -# and use any image even those with multiple uids. Note multiple UIDs will be -# squasheddown to the default uid in the container. These images will have no -# separation between the users in the container. Only supported for the overlay -# and vfs drivers. -#ignore_chown_errors = false - # Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of # a container, to the UIDs/GIDs as they should appear outside of the container, # and the length of the range of UIDs/GIDs. Additional mapped sets can be @@ -61,6 +42,28 @@ mountopt = "nodev" # remap-user = "storage" # remap-group = "storage" +[storage.options.overlay] +# ignore_chown_errors can be set to allow a non privileged user running with +# a single UID within a user namespace to run containers. The user can pull +# and use any image even those with multiple uids. Note multiple UIDs will be +# squashed down to the default uid in the container. These images will have no +# separation between the users in the container. Only supported for the overlay +# and vfs drivers. +#ignore_chown_errors = false + +# Path to an helper program to use for mounting the file system instead of mounting it +# directly. +#mount_program = "/usr/bin/fuse-overlayfs" + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + +# Set to skip a PRIVATE bind mount on the storage home directory. +skip_mount_home = "false" + +# Size is used to set a maximum size of the container image. +# size = "" + [storage.options.thinpool] # Storage Options for thinpool @@ -111,6 +114,9 @@ mountopt = "nodev" # device. # mkfsarg = "" +# Size is used to set a maximum size of the container image. +# size = "" + # use_deferred_removal marks devicemapper block device for deferred removal. # If the thinpool is in use when the driver attempts to remove it, the driver # tells the kernel to remove it as soon as possible. Note this does not free diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 6e4bd4ee0..654e86880 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -18,7 +18,7 @@ import ( "github.com/BurntSushi/toml" drivers "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/config" + cfg "github.com/containers/storage/pkg/config" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" @@ -3274,10 +3274,10 @@ func DefaultConfigFile(rootless bool) (string, error) { // TOML-friendly explicit tables used for conversions. type tomlConfig struct { Storage struct { - Driver string `toml:"driver"` - RunRoot string `toml:"runroot"` - GraphRoot string `toml:"graphroot"` - Options struct{ config.OptionsConfig } `toml:"options"` + Driver string `toml:"driver"` + RunRoot string `toml:"runroot"` + GraphRoot string `toml:"graphroot"` + Options cfg.OptionsConfig `toml:"options"` } `toml:"storage"` } @@ -3307,50 +3307,6 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { if config.Storage.GraphRoot != "" { storeOptions.GraphRoot = config.Storage.GraphRoot } - if config.Storage.Options.Thinpool.AutoExtendPercent != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", config.Storage.Options.Thinpool.AutoExtendPercent)) - } - - if config.Storage.Options.Thinpool.AutoExtendThreshold != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", config.Storage.Options.Thinpool.AutoExtendThreshold)) - } - - if config.Storage.Options.Thinpool.BaseSize != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.basesize=%s", config.Storage.Options.Thinpool.BaseSize)) - } - if config.Storage.Options.Thinpool.BlockSize != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.blocksize=%s", config.Storage.Options.Thinpool.BlockSize)) - } - if config.Storage.Options.Thinpool.DirectLvmDevice != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device=%s", config.Storage.Options.Thinpool.DirectLvmDevice)) - } - if config.Storage.Options.Thinpool.DirectLvmDeviceForce != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device_force=%s", config.Storage.Options.Thinpool.DirectLvmDeviceForce)) - } - if config.Storage.Options.Thinpool.Fs != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.fs=%s", config.Storage.Options.Thinpool.Fs)) - } - if config.Storage.Options.Thinpool.LogLevel != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.libdm_log_level=%s", config.Storage.Options.Thinpool.LogLevel)) - } - if config.Storage.Options.Thinpool.MinFreeSpace != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.min_free_space=%s", config.Storage.Options.Thinpool.MinFreeSpace)) - } - if config.Storage.Options.Thinpool.MkfsArg != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.mkfsarg=%s", config.Storage.Options.Thinpool.MkfsArg)) - } - if config.Storage.Options.Thinpool.MountOpt != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.Thinpool.MountOpt)) - } - if config.Storage.Options.Thinpool.UseDeferredDeletion != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_deletion=%s", config.Storage.Options.Thinpool.UseDeferredDeletion)) - } - if config.Storage.Options.Thinpool.UseDeferredRemoval != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_removal=%s", config.Storage.Options.Thinpool.UseDeferredRemoval)) - } - if config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries != "" { - storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries)) - } for _, s := range config.Storage.Options.AdditionalImageStores { storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) } @@ -3397,6 +3353,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { if os.Getenv("STORAGE_DRIVER") != "" { storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") } + + storeOptions.GraphDriverOptions = cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options) + if os.Getenv("STORAGE_OPTS") != "" { storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) } diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go deleted file mode 100644 index 8298d309a..000000000 --- a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go +++ /dev/null @@ -1,42 +0,0 @@ -//+build !noasm -//+build !appengine -//+build !gccgo - -// Copyright 2015, Klaus Post, see LICENSE for details. - -package flate - -import ( - "github.com/klauspost/cpuid" -) - -// crc32sse returns a hash for the first 4 bytes of the slice -// len(a) must be >= 4. -//go:noescape -func crc32sse(a []byte) uint32 - -// crc32sseAll calculates hashes for each 4-byte set in a. -// dst must be east len(a) - 4 in size. -// The size is not checked by the assembly. -//go:noescape -func crc32sseAll(a []byte, dst []uint32) - -// matchLenSSE4 returns the number of matching bytes in a and b -// up to length 'max'. Both slices must be at least 'max' -// bytes in size. -// -// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions. -// -//go:noescape -func matchLenSSE4(a, b []byte, max int) int - -// histogram accumulates a histogram of b in h. -// h must be at least 256 entries in length, -// and must be cleared before calling this function. -//go:noescape -func histogram(b []byte, h []int32) - -// Detect SSE 4.2 feature. -func init() { - useSSE42 = cpuid.CPU.SSE42() -} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s deleted file mode 100644 index a79943727..000000000 --- a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s +++ /dev/null @@ -1,214 +0,0 @@ -//+build !noasm -//+build !appengine -//+build !gccgo - -// Copyright 2015, Klaus Post, see LICENSE for details. - -// func crc32sse(a []byte) uint32 -TEXT ·crc32sse(SB), 4, $0 - MOVQ a+0(FP), R10 - XORQ BX, BX - - // CRC32 dword (R10), EBX - BYTE $0xF2; BYTE $0x41; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0x1a - - MOVL BX, ret+24(FP) - RET - -// func crc32sseAll(a []byte, dst []uint32) -TEXT ·crc32sseAll(SB), 4, $0 - MOVQ a+0(FP), R8 // R8: src - MOVQ a_len+8(FP), R10 // input length - MOVQ dst+24(FP), R9 // R9: dst - SUBQ $4, R10 - JS end - JZ one_crc - MOVQ R10, R13 - SHRQ $2, R10 // len/4 - ANDQ $3, R13 // len&3 - XORQ BX, BX - ADDQ $1, R13 - TESTQ R10, R10 - JZ rem_loop - -crc_loop: - MOVQ (R8), R11 - XORQ BX, BX - XORQ DX, DX - XORQ DI, DI - MOVQ R11, R12 - SHRQ $8, R11 - MOVQ R12, AX - MOVQ R11, CX - SHRQ $16, R12 - SHRQ $16, R11 - MOVQ R12, SI - - // CRC32 EAX, EBX - BYTE $0xF2; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0xd8 - - // CRC32 ECX, EDX - BYTE $0xF2; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0xd1 - - // CRC32 ESI, EDI - BYTE $0xF2; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0xfe - MOVL BX, (R9) - MOVL DX, 4(R9) - MOVL DI, 8(R9) - - XORQ BX, BX - MOVL R11, AX - - // CRC32 EAX, EBX - BYTE $0xF2; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0xd8 - MOVL BX, 12(R9) - - ADDQ $16, R9 - ADDQ $4, R8 - XORQ BX, BX - SUBQ $1, R10 - JNZ crc_loop - -rem_loop: - MOVL (R8), AX - - // CRC32 EAX, EBX - BYTE $0xF2; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0xd8 - - MOVL BX, (R9) - ADDQ $4, R9 - ADDQ $1, R8 - XORQ BX, BX - SUBQ $1, R13 - JNZ rem_loop - -end: - RET - -one_crc: - MOVQ $1, R13 - XORQ BX, BX - JMP rem_loop - -// func matchLenSSE4(a, b []byte, max int) int -TEXT ·matchLenSSE4(SB), 4, $0 - MOVQ a_base+0(FP), SI - MOVQ b_base+24(FP), DI - MOVQ DI, DX - MOVQ max+48(FP), CX - -cmp8: - // As long as we are 8 or more bytes before the end of max, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ CX, $8 - JLT cmp1 - MOVQ (SI), AX - MOVQ (DI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, SI - ADDQ $8, DI - SUBQ $8, CX - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, DI - - // Subtract off &b[0] to convert from &b[ret] to ret, and return. - SUBQ DX, DI - MOVQ DI, ret+56(FP) - RET - -cmp1: - // In the slices' tail, compare 1 byte at a time. - CMPQ CX, $0 - JEQ matchLenEnd - MOVB (SI), AX - MOVB (DI), BX - CMPB AX, BX - JNE matchLenEnd - ADDQ $1, SI - ADDQ $1, DI - SUBQ $1, CX - JMP cmp1 - -matchLenEnd: - // Subtract off &b[0] to convert from &b[ret] to ret, and return. - SUBQ DX, DI - MOVQ DI, ret+56(FP) - RET - -// func histogram(b []byte, h []int32) -TEXT ·histogram(SB), 4, $0 - MOVQ b+0(FP), SI // SI: &b - MOVQ b_len+8(FP), R9 // R9: len(b) - MOVQ h+24(FP), DI // DI: Histogram - MOVQ R9, R8 - SHRQ $3, R8 - JZ hist1 - XORQ R11, R11 - -loop_hist8: - MOVQ (SI), R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - INCL (DI)(R10*4) - - ADDQ $8, SI - DECQ R8 - JNZ loop_hist8 - -hist1: - ANDQ $7, R9 - JZ end_hist - XORQ R10, R10 - -loop_hist1: - MOVB (SI), R10 - INCL (DI)(R10*4) - INCQ SI - DECQ R9 - JNZ loop_hist1 - -end_hist: - RET diff --git a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go deleted file mode 100644 index dcf43bd50..000000000 --- a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go +++ /dev/null @@ -1,35 +0,0 @@ -//+build !amd64 noasm appengine gccgo - -// Copyright 2015, Klaus Post, see LICENSE for details. - -package flate - -func init() { - useSSE42 = false -} - -// crc32sse should never be called. -func crc32sse(a []byte) uint32 { - panic("no assembler") -} - -// crc32sseAll should never be called. -func crc32sseAll(a []byte, dst []uint32) { - panic("no assembler") -} - -// matchLenSSE4 should never be called. -func matchLenSSE4(a, b []byte, max int) int { - panic("no assembler") - return 0 -} - -// histogram accumulates a histogram of b in h. -// -// len(h) must be >= 256, and h's elements must be all zeroes. -func histogram(b []byte, h []int32) { - h = h[:256] - for _, t := range b { - h[t]++ - } -} diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 628795120..20c94f596 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -50,8 +50,6 @@ const ( skipNever = math.MaxInt32 ) -var useSSE42 bool - type compressionLevel struct { good, lazy, nice, chain, fastSkipHashing, level int } @@ -97,9 +95,8 @@ type advancedState struct { hashOffset int // input window: unprocessed data is window[index:windowEnd] - index int - bulkHasher func([]byte, []uint32) - hashMatch [maxMatchLength + minMatchLength]uint32 + index int + hashMatch [maxMatchLength + minMatchLength]uint32 } type compressor struct { @@ -120,7 +117,7 @@ type compressor struct { // queued output tokens tokens tokens - snap fastEnc + fast fastEnc state *advancedState } @@ -164,14 +161,14 @@ func (d *compressor) fillDeflate(b []byte) int { return n } -func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { +func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { if index > 0 || eof { var window []byte if d.blockStart <= index { window = d.window[d.blockStart:index] } d.blockStart = index - d.w.writeBlock(tok.tokens[:tok.n], eof, window) + d.w.writeBlock(tok, eof, window) return d.w.err } return nil @@ -180,20 +177,20 @@ func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { // writeBlockSkip writes the current block and uses the number of tokens // to determine if the block should be stored on no matches, or // only huffman encoded. -func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { +func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { if index > 0 || eof { if d.blockStart <= index { window := d.window[d.blockStart:index] // If we removed less than a 64th of all literals // we huffman compress the block. if int(tok.n) > len(window)-int(tok.n>>6) { - d.w.writeBlockHuff(eof, window) + d.w.writeBlockHuff(eof, window, d.sync) } else { // Write a dynamic huffman block. - d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window) + d.w.writeBlockDynamic(tok, eof, window, d.sync) } } else { - d.w.writeBlock(tok.tokens[:tok.n], eof, nil) + d.w.writeBlock(tok, eof, nil) } d.blockStart = index return d.w.err @@ -208,8 +205,16 @@ func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { func (d *compressor) fillWindow(b []byte) { // Do not fill window if we are in store-only mode, // use constant or Snappy compression. - switch d.compressionLevel.level { - case 0, 1, 2: + if d.level == 0 { + return + } + if d.fast != nil { + // encode the last data, but discard the result + if len(b) > maxMatchOffset { + b = b[len(b)-maxMatchOffset:] + } + d.fast.Encode(&d.tokens, b) + d.tokens.Reset() return } s := d.state @@ -236,7 +241,7 @@ func (d *compressor) fillWindow(b []byte) { } dst := s.hashMatch[:dstSize] - s.bulkHasher(tocheck, dst) + bulkHash4(tocheck, dst) var newH uint32 for i, val := range dst { di := i + startindex @@ -284,62 +289,7 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead for i := prevHead; tries > 0; tries-- { if wEnd == win[i+length] { - n := matchLen(win[i:], wPos, minMatchLook) - - if n > length && (n > minMatchLength || pos-i <= 4096) { - length = n - offset = pos - i - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - if i == minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex || i < 0 { - break - } - } - return -} - -// Try to find a match starting at index whose length is greater than prevSize. -// We only look at chainCount possibilities before giving up. -// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead -func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { - minMatchLook := maxMatchLength - if lookahead < minMatchLook { - minMatchLook = lookahead - } - - win := d.window[0 : pos+minMatchLook] - - // We quit when we get a match that's at least nice long - nice := len(win) - pos - if d.nice < nice { - nice = d.nice - } - - // If we've got a match that's good enough, only look in 1/4 the chain. - tries := d.chain - length = prevLength - if length >= d.good { - tries >>= 2 - } - - wEnd := win[pos+length] - wPos := win[pos:] - minIndex := pos - windowSize - - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLenSSE4(win[i:], wPos, minMatchLook) + n := matchLen(win[i:i+minMatchLook], wPos) if n > length && (n > minMatchLength || pos-i <= 4096) { length = n @@ -372,42 +322,27 @@ func (d *compressor) writeStoredBlock(buf []byte) error { return d.w.err } -const hashmul = 0x1e35a7bd - // hash4 returns a hash representation of the first 4 bytes // of the supplied slice. // The caller must ensure that len(b) >= 4. func hash4(b []byte) uint32 { - return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits) + b = b[:4] + return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits) } // bulkHash4 will compute hashes using the same // algorithm as hash4 func bulkHash4(b []byte, dst []uint32) { - if len(b) < minMatchLength { + if len(b) < 4 { return } hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 - dst[0] = (hb * hashmul) >> (32 - hashBits) - end := len(b) - minMatchLength + 1 + dst[0] = hash4u(hb, hashBits) + end := len(b) - 4 + 1 for i := 1; i < end; i++ { hb = (hb << 8) | uint32(b[i+3]) - dst[i] = (hb * hashmul) >> (32 - hashBits) - } -} - -// matchLen returns the number of matching bytes in a and b -// up to length 'max'. Both slices must be at least 'max' -// bytes in size. -func matchLen(a, b []byte, max int) int { - a = a[:max] - b = b[:len(a)] - for i, av := range a { - if b[i] != av { - return i - } + dst[i] = hash4u(hb, hashBits) } - return max } func (d *compressor) initDeflate() { @@ -424,149 +359,6 @@ func (d *compressor) initDeflate() { s.offset = 0 s.hash = 0 s.chainHead = -1 - s.bulkHasher = bulkHash4 - if useSSE42 { - s.bulkHasher = crc32sseAll - } -} - -// Assumes that d.fastSkipHashing != skipNever, -// otherwise use deflateLazy -func (d *compressor) deflate() { - s := d.state - // Sanity enables additional runtime tests. - // It's intended to be used during development - // to supplement the currently ad-hoc unit tests. - const sanity = false - - if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { - return - } - - s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - if s.index < s.maxInsertIndex { - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) - } - - for { - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - lookahead := d.windowEnd - s.index - if lookahead < minMatchLength+maxMatchLength { - if !d.sync { - return - } - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - if lookahead == 0 { - if d.tokens.n > 0 { - if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - return - } - } - if s.index < s.maxInsertIndex { - // Update the hash - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) - ch := s.hashHead[s.hash&hashMask] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset) - } - s.length = minMatchLength - 1 - s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } - - if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 { - if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { - s.length = newLength - s.offset = newOffset - } - } - if s.length >= minMatchLength { - s.ii = 0 - // There was a match at the previous step, and the current match is - // not better. Output the previous match. - // "s.length-3" should NOT be "s.length-minMatchLength", since the format always assume 3 - d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize)) - d.tokens.n++ - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough - // lookahead, the last two strings are not inserted into the hash - // table. - if s.length <= d.fastSkipHashing { - var newIndex int - newIndex = s.index + s.length - // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } - end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - if dstSize > 0 { - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - s.hash = newH - } - s.index = newIndex - } else { - // For matches this long, we don't bother inserting each individual - // item into the table. - s.index += s.length - if s.index < s.maxInsertIndex { - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) - } - } - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - } else { - s.ii++ - end := s.index + int(s.ii>>uint(d.fastSkipHashing)) + 1 - if end > d.windowEnd { - end = d.windowEnd - } - for i := s.index; i < end; i++ { - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) - d.tokens.n++ - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { - return - } - d.tokens.n = 0 - } - } - s.index = end - } - } } // deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, @@ -603,15 +395,14 @@ func (d *compressor) deflateLazy() { // Flush current output block if any. if d.byteAvailable { // There is still one pending token that needs to be flushed - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ + d.tokens.AddLiteral(d.window[s.index-1]) d.byteAvailable = false } if d.tokens.n > 0 { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { return } - d.tokens.n = 0 + d.tokens.Reset() } return } @@ -642,8 +433,7 @@ func (d *compressor) deflateLazy() { if prevLength >= minMatchLength && s.length <= prevLength { // There was a match at the previous step, and the current match is // not better. Output the previous match. - d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) - d.tokens.n++ + d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) // Insert in the hash table all strings up to the end of the match. // index and index-1 are already inserted. If there is not enough @@ -684,10 +474,10 @@ func (d *compressor) deflateLazy() { s.length = minMatchLength - 1 if d.tokens.n == maxFlateBlockTokens { // The block includes the current character - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { return } - d.tokens.n = 0 + d.tokens.Reset() } } else { // Reset, if we got a match this run. @@ -697,13 +487,12 @@ func (d *compressor) deflateLazy() { // We have a byte waiting. Emit it. if d.byteAvailable { s.ii++ - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ + d.tokens.AddLiteral(d.window[s.index-1]) if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { return } - d.tokens.n = 0 + d.tokens.Reset() } s.index++ @@ -716,343 +505,24 @@ func (d *compressor) deflateLazy() { break } - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ + d.tokens.AddLiteral(d.window[s.index-1]) if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { return } - d.tokens.n = 0 + d.tokens.Reset() } s.index++ } // Flush last byte - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ + d.tokens.AddLiteral(d.window[s.index-1]) d.byteAvailable = false // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { return } - d.tokens.n = 0 - } - } - } else { - s.index++ - d.byteAvailable = true - } - } - } -} - -// Assumes that d.fastSkipHashing != skipNever, -// otherwise use deflateLazySSE -func (d *compressor) deflateSSE() { - s := d.state - // Sanity enables additional runtime tests. - // It's intended to be used during development - // to supplement the currently ad-hoc unit tests. - const sanity = false - - if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { - return - } - - s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - if s.index < s.maxInsertIndex { - s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask - } - - for { - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - lookahead := d.windowEnd - s.index - if lookahead < minMatchLength+maxMatchLength { - if !d.sync { - return - } - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - if lookahead == 0 { - if d.tokens.n > 0 { - if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - return - } - } - if s.index < s.maxInsertIndex { - // Update the hash - s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask - ch := s.hashHead[s.hash] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[s.hash] = uint32(s.index + s.hashOffset) - } - s.length = minMatchLength - 1 - s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } - - if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 { - if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { - s.length = newLength - s.offset = newOffset - } - } - if s.length >= minMatchLength { - s.ii = 0 - // There was a match at the previous step, and the current match is - // not better. Output the previous match. - // "s.length-3" should NOT be "s.length-minMatchLength", since the format always assume 3 - d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize)) - d.tokens.n++ - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough - // lookahead, the last two strings are not inserted into the hash - // table. - if s.length <= d.fastSkipHashing { - var newIndex int - newIndex = s.index + s.length - // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } - end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - if dstSize > 0 { - dst := s.hashMatch[:dstSize] - - crc32sseAll(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - s.hash = newH - } - s.index = newIndex - } else { - // For matches this long, we don't bother inserting each individual - // item into the table. - s.index += s.length - if s.index < s.maxInsertIndex { - s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask - } - } - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - } else { - s.ii++ - end := s.index + int(s.ii>>5) + 1 - if end > d.windowEnd { - end = d.windowEnd - } - for i := s.index; i < end; i++ { - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) - d.tokens.n++ - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { - return - } - d.tokens.n = 0 - } - } - s.index = end - } - } -} - -// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, -// meaning it always has lazy matching on. -func (d *compressor) deflateLazySSE() { - s := d.state - // Sanity enables additional runtime tests. - // It's intended to be used during development - // to supplement the currently ad-hoc unit tests. - const sanity = false - - if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { - return - } - - s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - if s.index < s.maxInsertIndex { - s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask - } - - for { - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - lookahead := d.windowEnd - s.index - if lookahead < minMatchLength+maxMatchLength { - if !d.sync { - return - } - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - if lookahead == 0 { - // Flush current output block if any. - if d.byteAvailable { - // There is still one pending token that needs to be flushed - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ - d.byteAvailable = false - } - if d.tokens.n > 0 { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - return - } - } - if s.index < s.maxInsertIndex { - // Update the hash - s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask - ch := s.hashHead[s.hash] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[s.hash] = uint32(s.index + s.hashOffset) - } - prevLength := s.length - prevOffset := s.offset - s.length = minMatchLength - 1 - s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } - - if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { - if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { - s.length = newLength - s.offset = newOffset - } - } - if prevLength >= minMatchLength && s.length <= prevLength { - // There was a match at the previous step, and the current match is - // not better. Output the previous match. - d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) - d.tokens.n++ - - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough - // lookahead, the last two strings are not inserted into the hash - // table. - var newIndex int - newIndex = s.index + prevLength - 1 - // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } - end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - if dstSize > 0 { - dst := s.hashMatch[:dstSize] - crc32sseAll(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - s.hash = newH - } - - s.index = newIndex - d.byteAvailable = false - s.length = minMatchLength - 1 - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - } else { - // Reset, if we got a match this run. - if s.length >= minMatchLength { - s.ii = 0 - } - // We have a byte waiting. Emit it. - if d.byteAvailable { - s.ii++ - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - s.index++ - - // If we have a long run of no matches, skip additional bytes - // Resets when s.ii overflows after 64KB. - if s.ii > 31 { - n := int(s.ii >> 6) - for j := 0; j < n; j++ { - if s.index >= d.windowEnd-1 { - break - } - - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - s.index++ - } - // Flush last byte - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ - d.byteAvailable = false - // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 + d.tokens.Reset() } } } else { @@ -1085,17 +555,17 @@ func (d *compressor) storeHuff() { if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { return } - d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) d.err = d.w.err d.windowEnd = 0 } -// storeHuff will compress and store the currently added data, +// storeFast will compress and store the currently added data, // if enough has been accumulated or we at the end of the stream. // Any error that occurred will be in d.err -func (d *compressor) storeSnappy() { +func (d *compressor) storeFast() { // We only compress if we have maxStoreBlockSize. - if d.windowEnd < maxStoreBlockSize { + if d.windowEnd < len(d.window) { if !d.sync { return } @@ -1106,32 +576,30 @@ func (d *compressor) storeSnappy() { } if d.windowEnd <= 32 { d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - d.tokens.n = 0 - d.windowEnd = 0 } else { - d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) d.err = d.w.err } - d.tokens.n = 0 + d.tokens.Reset() d.windowEnd = 0 - d.snap.Reset() + d.fast.Reset() return } } - d.snap.Encode(&d.tokens, d.window[:d.windowEnd]) + d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) // If we made zero matches, store the block as is. - if int(d.tokens.n) == d.windowEnd { + if d.tokens.n == 0 { d.err = d.writeStoredBlock(d.window[:d.windowEnd]) // If we removed less than 1/16th, huffman compress the block. } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { - d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) d.err = d.w.err } else { - d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd]) + d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) d.err = d.w.err } - d.tokens.n = 0 + d.tokens.Reset() d.windowEnd = 0 } @@ -1176,36 +644,26 @@ func (d *compressor) init(w io.Writer, level int) (err error) { d.fill = (*compressor).fillBlock d.step = (*compressor).store case level == ConstantCompression: + d.w.logReusePenalty = uint(4) d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).storeHuff - case level >= 1 && level <= 4: - d.snap = newFastEnc(level) - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeSnappy case level == DefaultCompression: level = 5 fallthrough - case 5 <= level && level <= 9: + case level >= 1 && level <= 6: + d.w.logReusePenalty = uint(level + 1) + d.fast = newFastEnc(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + case 7 <= level && level <= 9: + d.w.logReusePenalty = uint(level) d.state = &advancedState{} d.compressionLevel = levels[level] d.initDeflate() d.fill = (*compressor).fillDeflate - if d.fastSkipHashing == skipNever { - if useSSE42 { - d.step = (*compressor).deflateLazySSE - } else { - d.step = (*compressor).deflateLazy - } - } else { - if useSSE42 { - d.step = (*compressor).deflateSSE - } else { - d.step = (*compressor).deflate - - } - } + d.step = (*compressor).deflateLazy default: return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) } @@ -1218,10 +676,10 @@ func (d *compressor) reset(w io.Writer) { d.sync = false d.err = nil // We only need to reset a few things for Snappy. - if d.snap != nil { - d.snap.Reset() + if d.fast != nil { + d.fast.Reset() d.windowEnd = 0 - d.tokens.n = 0 + d.tokens.Reset() return } switch d.compressionLevel.chain { @@ -1240,7 +698,7 @@ func (d *compressor) reset(w io.Writer) { s.hashOffset = 1 s.index, d.windowEnd = 0, 0 d.blockStart, d.byteAvailable = 0, false - d.tokens.n = 0 + d.tokens.Reset() s.length = minMatchLength - 1 s.offset = 0 s.hash = 0 diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go new file mode 100644 index 000000000..b0a470f92 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -0,0 +1,257 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "fmt" + "math/bits" +) + +type fastEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newFastEnc(level int) fastEnc { + switch level { + case 1: + return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} + case 2: + return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} + case 3: + return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} + case 4: + return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} + case 5: + return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} + case 6: + return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 16 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset + + bTableBits = 18 // Bits used in the big tables + bTableSize = 1 << bTableBits // Size of the table + allocHistory = maxMatchOffset * 10 // Size to preallocate for history. + bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize // Reset the buffer offset when reaching this. +) + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +func load32(b []byte, i int) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func load3232(b []byte, i int32) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func hash(u uint32) uint32 { + return (u * 0x1e35a7bd) >> tableShift +} + +type tableEntry struct { + val uint32 + offset int32 +} + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastGen struct { + hist []byte + cur int32 +} + +func (e *fastGen) addBlock(src []byte) int32 { + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < maxMatchOffset*2 { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4u(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> ((32 - h) & 31) +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4x64(u uint64, h uint8) uint32 { + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastGen) Reset() { + if cap(e.hist) < int(maxMatchOffset*8) { + l := maxMatchOffset * 8 + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } + // We offset current position so everything will be out of reach + e.cur += maxMatchOffset + int32(len(e.hist)) + e.hist = e.hist[:0] +} + +// matchLen returns the maximum length. +// 'a' must be the shortest of the two. +func matchLen(a, b []byte) int { + b = b[:len(a)] + var checked int + if len(a) > 4 { + // Try 4 bytes first + if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { + return bits.TrailingZeros32(diff) >> 3 + } + // Switch to 8 byte matching. + checked = 4 + a = a[4:] + b = b[4:] + for len(a) >= 8 { + b = b[:len(a)] + if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) + } + checked += 8 + a = a[8:] + b = b[8:] + } + } + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + return int(i) + checked + } + } + return len(a) + checked +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index f46c65418..5ed476aa0 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -85,26 +85,48 @@ type huffmanBitWriter struct { // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 - nbits uint - bytes [256]byte - codegenFreq [codegenCodeCount]int32 + nbits uint16 nbytes uint8 - literalFreq []int32 - offsetFreq []int32 - codegen []uint8 literalEncoding *huffmanEncoder offsetEncoding *huffmanEncoder codegenEncoding *huffmanEncoder err error + lastHeader int + // Set between 0 (reused block can be up to 2x the size) + logReusePenalty uint + lastHuffMan bool + bytes [256]byte + literalFreq [lengthCodesStart + 32]uint16 + offsetFreq [32]uint16 + codegenFreq [codegenCodeCount]uint16 + + // codegen must have an extra space for the final symbol. + codegen [literalCount + offsetCodeCount + 1]uint8 } +// Huffman reuse. +// +// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. +// +// This is controlled by several variables: +// +// If lastHeader is non-zero the Huffman table can be reused. +// This also indicates that a Huffman table has been generated that can output all +// possible symbols. +// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated +// an EOB with the previous table must be written. +// +// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. +// +// An incoming block estimates the output size of a new table using a 'fresh' by calculating the +// optimal size and adding a penalty in 'logReusePenalty'. +// A Huffman table is not optimal, which is why we add a penalty, and generating a new table +// is slower both for compression and decompression. + func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { return &huffmanBitWriter{ writer: w, - literalFreq: make([]int32, lengthCodesStart+32), - offsetFreq: make([]int32, 32), - codegen: make([]uint8, maxNumLit+offsetCodeCount+1), - literalEncoding: newHuffmanEncoder(maxNumLit), + literalEncoding: newHuffmanEncoder(literalCount), codegenEncoding: newHuffmanEncoder(codegenCodeCount), offsetEncoding: newHuffmanEncoder(offsetCodeCount), } @@ -114,6 +136,41 @@ func (w *huffmanBitWriter) reset(writer io.Writer) { w.writer = writer w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil w.bytes = [256]byte{} + w.lastHeader = 0 + w.lastHuffMan = false +} + +func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) { + offsets, lits = true, true + a := t.offHist[:offsetCodeCount] + b := w.offsetFreq[:len(a)] + for i := range a { + if b[i] == 0 && a[i] != 0 { + offsets = false + break + } + } + + a = t.extraHist[:literalCount-256] + b = w.literalFreq[256:literalCount] + b = b[:len(a)] + for i := range a { + if b[i] == 0 && a[i] != 0 { + lits = false + break + } + } + if lits { + a = t.litHist[:] + b = w.literalFreq[:len(a)] + for i := range a { + if b[i] == 0 && a[i] != 0 { + lits = false + break + } + } + } + return } func (w *huffmanBitWriter) flush() { @@ -144,30 +201,11 @@ func (w *huffmanBitWriter) write(b []byte) { _, w.err = w.writer.Write(b) } -func (w *huffmanBitWriter) writeBits(b int32, nb uint) { - w.bits |= uint64(b) << w.nbits +func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { + w.bits |= uint64(b) << (w.nbits & 63) w.nbits += nb if w.nbits >= 48 { - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - n := w.nbytes - w.bytes[n] = byte(bits) - w.bytes[n+1] = byte(bits >> 8) - w.bytes[n+2] = byte(bits >> 16) - w.bytes[n+3] = byte(bits >> 24) - w.bytes[n+4] = byte(bits >> 32) - w.bytes[n+5] = byte(bits >> 40) - n += 6 - if n >= bufferFlushSize { - if w.err != nil { - n = 0 - return - } - w.write(w.bytes[:n]) - n = 0 - } - w.nbytes = n + w.writeOutBits() } } @@ -213,7 +251,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE // a copy of the frequencies, and as the place where we put the result. // This is fine because the output is always shorter than the input used // so far. - codegen := w.codegen // cache + codegen := w.codegen[:] // cache // Copy the concatenated code sizes to codegen. Put a marker at the end. cgnl := codegen[:numLiterals] for i := range cgnl { @@ -292,30 +330,54 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE codegen[outIndex] = badCode } -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { +func (w *huffmanBitWriter) codegens() int { + numCodegens := len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return numCodegens +} + +func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { numCodegens = len(w.codegenFreq) for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { numCodegens-- } - header := 3 + 5 + 5 + 4 + (3 * numCodegens) + + return 3 + 5 + 5 + 4 + (3 * numCodegens) + w.codegenEncoding.bitLength(w.codegenFreq[:]) + int(w.codegenFreq[16])*2 + int(w.codegenFreq[17])*3 + - int(w.codegenFreq[18])*7 + int(w.codegenFreq[18])*7, numCodegens +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + header, numCodegens := w.headerSize() size = header + - litEnc.bitLength(w.literalFreq) + - offEnc.bitLength(w.offsetFreq) + + litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + extraBits - return size, numCodegens } +// extraBitSize will return the number of bits that will be written +// as "extra" bits on matches. +func (w *huffmanBitWriter) extraBitSize() int { + total := 0 + for i, n := range w.literalFreq[257:literalCount] { + total += int(n) * int(lengthExtraBits[i&31]) + } + for i, n := range w.offsetFreq[:offsetCodeCount] { + total += int(n) * int(offsetExtraBits[i&31]) + } + return total +} + // fixedSize returns the size of dynamically encoded data in bits. func (w *huffmanBitWriter) fixedSize(extraBits int) int { return 3 + - fixedLiteralEncoding.bitLength(w.literalFreq) + - fixedOffsetEncoding.bitLength(w.offsetFreq) + + fixedLiteralEncoding.bitLength(w.literalFreq[:]) + + fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + extraBits } @@ -333,30 +395,36 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { } func (w *huffmanBitWriter) writeCode(c hcode) { + // The function does not get inlined if we "& 63" the shift. w.bits |= uint64(c.code) << w.nbits - w.nbits += uint(c.len) + w.nbits += c.len if w.nbits >= 48 { - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - n := w.nbytes - w.bytes[n] = byte(bits) - w.bytes[n+1] = byte(bits >> 8) - w.bytes[n+2] = byte(bits >> 16) - w.bytes[n+3] = byte(bits >> 24) - w.bytes[n+4] = byte(bits >> 32) - w.bytes[n+5] = byte(bits >> 40) - n += 6 - if n >= bufferFlushSize { - if w.err != nil { - n = 0 - return - } - w.write(w.bytes[:n]) + w.writeOutBits() + } +} + +// writeOutBits will write bits to the buffer. +func (w *huffmanBitWriter) writeOutBits() { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + w.bytes[n] = byte(bits) + w.bytes[n+1] = byte(bits >> 8) + w.bytes[n+2] = byte(bits >> 16) + w.bytes[n+3] = byte(bits >> 24) + w.bytes[n+4] = byte(bits >> 32) + w.bytes[n+5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + if w.err != nil { n = 0 + return } - w.nbytes = n + w.write(w.bytes[:n]) + n = 0 } + w.nbytes = n } // Write the header of a dynamic Huffman block to the output stream. @@ -412,6 +480,11 @@ func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { if w.err != nil { return } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } var flag int32 if isEof { flag = 1 @@ -426,6 +499,12 @@ func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { if w.err != nil { return } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + // Indicate that we are a fixed Huffman block var value int32 = 2 if isEof { @@ -439,29 +518,23 @@ func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { // is larger than the original bytes, the data will be written as a // stored block. // If the input is nil, the tokens will always be Huffman encoded. -func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { +func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { if w.err != nil { return } - tokens = append(tokens, endBlockMarker) - numLiterals, numOffsets := w.indexTokens(tokens) - + tokens.AddEOB() + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + numLiterals, numOffsets := w.indexTokens(tokens, false) + w.generate(tokens) var extraBits int storedSize, storable := w.storedSize(input) if storable { - // We only bother calculating the costs of the extra bits required by - // the length of offset fields (which will be the same for both fixed - // and dynamic encoding), if we need to compare those two encodings - // against stored encoding. - for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { - // First eight length codes have extra size = 0. - extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart]) - } - for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { - // First four offset codes have extra size = 0. - extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode&63]) - } + extraBits = w.extraBitSize() } // Figure out smallest code. @@ -500,7 +573,7 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { } // Write the tokens. - w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes) + w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) } // writeBlockDynamic encodes a block using a dynamic Huffman table. @@ -508,72 +581,103 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { // histogram distribution. // If input is supplied and the compression savings are below 1/16th of the // input size the block is stored. -func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) { +func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { if w.err != nil { return } - tokens = append(tokens, endBlockMarker) - numLiterals, numOffsets := w.indexTokens(tokens) + sync = sync || eof + if sync { + tokens.AddEOB() + } - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0) + // We cannot reuse pure huffman table. + if w.lastHuffMan && w.lastHeader > 0 { + // We will not try to reuse. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } + if !sync { + tokens.Fill() + } + numLiterals, numOffsets := w.indexTokens(tokens, !sync) - // Store bytes, if we don't get a reasonable improvement. - if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return + var size int + // Check if we should reuse. + if w.lastHeader > 0 { + // Estimate size for using a new table + newSize := w.lastHeader + tokens.EstimatedBits() + + // The estimated size is calculated as an optimal table. + // We add a penalty to make it more realistic and re-use a bit more. + newSize += newSize >> (w.logReusePenalty & 31) + extra := w.extraBitSize() + reuseSize, _ := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extra) + + // Check if a new table is better. + if newSize < reuseSize { + // Write the EOB we owe. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + size = newSize + w.lastHeader = 0 + } else { + size = reuseSize + } + // Check if we get a reasonable size decrease. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + w.lastHeader = 0 + return + } } - // Write Huffman table. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + // We want a new block/table + if w.lastHeader == 0 { + w.generate(tokens) + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + var numCodegens int + size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize()) + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + w.lastHeader = 0 + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + w.lastHeader, _ = w.headerSize() + w.lastHuffMan = false + } + if sync { + w.lastHeader = 0 + } // Write the tokens. - w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes) + w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) } // indexTokens indexes a slice of tokens, and updates // literalFreq and offsetFreq, and generates literalEncoding // and offsetEncoding. // The number of literal and offset tokens is returned. -func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) { - for i := range w.literalFreq { - w.literalFreq[i] = 0 - } - for i := range w.offsetFreq { - w.offsetFreq[i] = 0 - } +func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { + copy(w.literalFreq[:], t.litHist[:]) + copy(w.literalFreq[256:], t.extraHist[:]) + copy(w.offsetFreq[:], t.offHist[:offsetCodeCount]) - if len(tokens) == 0 { + if t.n == 0 { return } - - // Only last token should be endBlockMarker. - if tokens[len(tokens)-1] == endBlockMarker { - w.literalFreq[endBlockMarker]++ - tokens = tokens[:len(tokens)-1] + if filled { + return maxNumLit, maxNumDist } - - // Create slices up to the next power of two to avoid bounds checks. - lits := w.literalFreq[:256] - offs := w.offsetFreq[:32] - lengths := w.literalFreq[lengthCodesStart:] - lengths = lengths[:32] - for _, t := range tokens { - if t < endBlockMarker { - lits[t.literal()]++ - continue - } - length := t.length() - offset := t.offset() - lengths[lengthCode(length)&31]++ - offs[offsetCode(offset)&31]++ - } - // get the number of literals numLiterals = len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { @@ -590,11 +694,14 @@ func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets w.offsetFreq[0] = 1 numOffsets = 1 } - w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15) - w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) return } +func (w *huffmanBitWriter) generate(t *tokens) { + w.literalEncoding.generate(w.literalFreq[:literalCount], 15) + w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) +} + // writeTokens writes a slice of tokens to the output. // codes for literal and offset encoding must be supplied. func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { @@ -626,8 +733,19 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) // Write the length length := t.length() lengthCode := lengthCode(length) - w.writeCode(lengths[lengthCode&31]) - extraLengthBits := uint(lengthExtraBits[lengthCode&31]) + if false { + w.writeCode(lengths[lengthCode&31]) + } else { + // inlined + c := lengths[lengthCode&31] + w.bits |= uint64(c.code) << (w.nbits & 63) + w.nbits += c.len + if w.nbits >= 48 { + w.writeOutBits() + } + } + + extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) if extraLengthBits > 0 { extraLength := int32(length - lengthBase[lengthCode&31]) w.writeBits(extraLength, extraLengthBits) @@ -635,8 +753,18 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) // Write the offset offset := t.offset() offsetCode := offsetCode(offset) - w.writeCode(offs[offsetCode&31]) - extraOffsetBits := uint(offsetExtraBits[offsetCode&63]) + if false { + w.writeCode(offs[offsetCode&31]) + } else { + // inlined + c := offs[offsetCode&31] + w.bits |= uint64(c.code) << (w.nbits & 63) + w.nbits += c.len + if w.nbits >= 48 { + w.writeOutBits() + } + } + extraOffsetBits := uint16(offsetExtraBits[offsetCode&63]) if extraOffsetBits > 0 { extraOffset := int32(offset - offsetBase[offsetCode&63]) w.writeBits(extraOffset, extraOffsetBits) @@ -661,75 +789,93 @@ func init() { // writeBlockHuff encodes a block of bytes as either // Huffman encoded literals or uncompressed bytes if the // results only gains very little from compression. -func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { if w.err != nil { return } // Clear histogram - for i := range w.literalFreq { + for i := range w.literalFreq[:] { w.literalFreq[i] = 0 } + if !w.lastHuffMan { + for i := range w.offsetFreq[:] { + w.offsetFreq[i] = 0 + } + } // Add everything as literals - histogram(input, w.literalFreq) + estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) + 15 - w.literalFreq[endBlockMarker] = 1 + // Store bytes, if we don't get a reasonable improvement. + ssize, storable := w.storedSize(input) + if storable && ssize < (estBits+estBits>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } - const numLiterals = endBlockMarker + 1 - const numOffsets = 1 + if w.lastHeader > 0 { + size, _ := w.dynamicSize(w.literalEncoding, huffOffset, w.lastHeader) + estBits += estBits >> (w.logReusePenalty) - w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15) + if estBits < size { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + } - // Figure out smallest code. - // Always use dynamic Huffman or Store - var numCodegens int + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + if w.lastHeader == 0 { + w.literalFreq[endBlockMarker] = 1 + w.literalEncoding.generate(w.literalFreq[:numLiterals], 15) - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0) + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + numCodegens := w.codegens() - // Store bytes, if we don't get a reasonable improvement. - if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + w.lastHuffMan = true + w.lastHeader, _ = w.headerSize() } - // Huffman. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) encoding := w.literalEncoding.codes[:257] - n := w.nbytes for _, t := range input { // Bitwriting inlined, ~30% speedup c := encoding[t] - w.bits |= uint64(c.code) << w.nbits - w.nbits += uint(c.len) - if w.nbits < 48 { - continue - } - // Store 6 bytes - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - w.bytes[n] = byte(bits) - w.bytes[n+1] = byte(bits >> 8) - w.bytes[n+2] = byte(bits >> 16) - w.bytes[n+3] = byte(bits >> 24) - w.bytes[n+4] = byte(bits >> 32) - w.bytes[n+5] = byte(bits >> 40) - n += 6 - if n < bufferFlushSize { - continue - } - w.write(w.bytes[:n]) - if w.err != nil { - return // Return early in the event of write failures + w.bits |= uint64(c.code) << ((w.nbits) & 63) + w.nbits += c.len + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + w.bytes[n] = byte(bits) + w.bytes[n+1] = byte(bits >> 8) + w.bytes[n+2] = byte(bits >> 16) + w.bytes[n+3] = byte(bits >> 24) + w.bytes[n+4] = byte(bits >> 32) + w.bytes[n+5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + if w.err != nil { + n = 0 + return + } + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n } - n = 0 } - w.nbytes = n - w.writeCode(encoding[endBlockMarker]) + if eof || sync { + w.writeCode(encoding[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index f65f79336..d0099599c 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -10,6 +10,12 @@ import ( "sort" ) +const ( + maxBitsLimit = 16 + // number of valid literals + literalCount = 286 +) + // hcode is a huffman code with a bit code and bit length. type hcode struct { code, len uint16 @@ -25,7 +31,7 @@ type huffmanEncoder struct { type literalNode struct { literal uint16 - freq int32 + freq uint16 } // A levelInfo describes the state of the constructed tree for a given depth. @@ -54,7 +60,11 @@ func (h *hcode) set(code uint16, length uint16) { h.code = code } -func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} } +func reverseBits(number uint16, bitLength byte) uint16 { + return bits.Reverse16(number << ((16 - bitLength) & 15)) +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } func newHuffmanEncoder(size int) *huffmanEncoder { // Make capacity to next power of two. @@ -64,10 +74,10 @@ func newHuffmanEncoder(size int) *huffmanEncoder { // Generates a HuffmanCode corresponding to the fixed literal table func generateFixedLiteralEncoding() *huffmanEncoder { - h := newHuffmanEncoder(maxNumLit) + h := newHuffmanEncoder(literalCount) codes := h.codes var ch uint16 - for ch = 0; ch < maxNumLit; ch++ { + for ch = 0; ch < literalCount; ch++ { var bits uint16 var size uint16 switch { @@ -108,7 +118,7 @@ func generateFixedOffsetEncoding() *huffmanEncoder { var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() -func (h *huffmanEncoder) bitLength(freq []int32) int { +func (h *huffmanEncoder) bitLength(freq []uint16) int { var total int for i, f := range freq { if f != 0 { @@ -118,8 +128,6 @@ func (h *huffmanEncoder) bitLength(freq []int32) int { return total } -const maxBitsLimit = 16 - // Return the number of literals assigned to each bit size in the Huffman encoding // // This method is only called when list.length >= 3 @@ -163,9 +171,9 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // We initialize the levels as if we had already figured this out. levels[level] = levelInfo{ level: level, - lastFreq: list[1].freq, - nextCharFreq: list[2].freq, - nextPairFreq: list[0].freq + list[1].freq, + lastFreq: int32(list[1].freq), + nextCharFreq: int32(list[2].freq), + nextPairFreq: int32(list[0].freq) + int32(list[1].freq), } leafCounts[level][level] = 2 if level == 1 { @@ -197,7 +205,12 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { l.lastFreq = l.nextCharFreq // Lower leafCounts are the same of the previous node. leafCounts[level][level] = n - l.nextCharFreq = list[n].freq + e := list[n] + if e.literal < math.MaxUint16 { + l.nextCharFreq = int32(e.freq) + } else { + l.nextCharFreq = math.MaxInt32 + } } else { // The next item on this row is a pair from the previous row. // nextPairFreq isn't valid until we generate two @@ -273,12 +286,12 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // // freq An array of frequencies, in which frequency[i] gives the frequency of literal i. // maxBits The maximum number of bits to use for any literal. -func (h *huffmanEncoder) generate(freq []int32, maxBits int32) { +func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { if h.freqcache == nil { // Allocate a reusable buffer with the longest possible frequency table. - // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit. - // The largest of these is maxNumLit, so we allocate for that case. - h.freqcache = make([]literalNode, maxNumLit+1) + // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. + // The largest of these is literalCount, so we allocate for that case. + h.freqcache = make([]literalNode, literalCount+1) } list := h.freqcache[:len(freq)+1] // Number of non-zero literals @@ -345,3 +358,27 @@ func (s byFreq) Less(i, j int) bool { } func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// histogramSize accumulates a histogram of b in h. +// An estimated size in bits is returned. +// Unassigned values are assigned '1' in the histogram. +// len(h) must be >= 256, and h's elements must be all zeroes. +func histogramSize(b []byte, h []uint16, fill bool) int { + h = h[:256] + for _, t := range b { + h[t]++ + } + invTotal := 1.0 / float64(len(b)) + shannon := 0.0 + single := math.Ceil(-math.Log2(invTotal)) + for i, v := range h[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } else if fill { + shannon += single + h[i] = 1 + } + } + return int(shannon + 0.99) +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 800d0ce9e..6dc5b5d06 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -9,6 +9,7 @@ package flate import ( "bufio" + "fmt" "io" "math/bits" "strconv" @@ -24,6 +25,8 @@ const ( maxNumLit = 286 maxNumDist = 30 numCodes = 19 // number of codes in Huffman meta-code + + debugDecode = false ) // Initialize the fixedHuffmanDecoder only once upon first use. @@ -104,8 +107,8 @@ const ( type huffmanDecoder struct { min int // the minimum code length - chunks *[huffmanNumChunks]uint32 // chunks as described above - links [][]uint32 // overflow links + chunks *[huffmanNumChunks]uint16 // chunks as described above + links [][]uint16 // overflow links linkMask uint32 // mask the width of the link table } @@ -121,7 +124,7 @@ func (h *huffmanDecoder) init(lengths []int) bool { const sanity = false if h.chunks == nil { - h.chunks = &[huffmanNumChunks]uint32{} + h.chunks = &[huffmanNumChunks]uint16{} } if h.min != 0 { *h = huffmanDecoder{chunks: h.chunks, links: h.links} @@ -169,6 +172,9 @@ func (h *huffmanDecoder) init(lengths []int) bool { // accept degenerate single-code codings. See also // TestDegenerateHuffmanCoding. if code != 1<<uint(max) && !(code == 1 && max == 1) { + if debugDecode { + fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)") + } return false } @@ -185,7 +191,7 @@ func (h *huffmanDecoder) init(lengths []int) bool { // create link tables link := nextcode[huffmanChunkBits+1] >> 1 if cap(h.links) < huffmanNumChunks-link { - h.links = make([][]uint32, huffmanNumChunks-link) + h.links = make([][]uint16, huffmanNumChunks-link) } else { h.links = h.links[:huffmanNumChunks-link] } @@ -196,9 +202,9 @@ func (h *huffmanDecoder) init(lengths []int) bool { if sanity && h.chunks[reverse] != 0 { panic("impossible: overwriting existing chunk") } - h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1)) + h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1)) if cap(h.links[off]) < numLinks { - h.links[off] = make([]uint32, numLinks) + h.links[off] = make([]uint16, numLinks) } else { links := h.links[off][:0] h.links[off] = links[:numLinks] @@ -214,7 +220,7 @@ func (h *huffmanDecoder) init(lengths []int) bool { } code := nextcode[n] nextcode[n]++ - chunk := uint32(i<<huffmanValueShift | n) + chunk := uint16(i<<huffmanValueShift | n) reverse := int(bits.Reverse16(uint16(code))) reverse >>= uint(16 - n) if n <= huffmanChunkBits { @@ -347,6 +353,9 @@ func (f *decompressor) nextBlock() { f.huffmanBlock() default: // 3 is reserved. + if debugDecode { + fmt.Println("reserved data block encountered") + } f.err = CorruptInputError(f.roffset) } } @@ -425,11 +434,17 @@ func (f *decompressor) readHuffman() error { } nlit := int(f.b&0x1F) + 257 if nlit > maxNumLit { + if debugDecode { + fmt.Println("nlit > maxNumLit", nlit) + } return CorruptInputError(f.roffset) } f.b >>= 5 ndist := int(f.b&0x1F) + 1 if ndist > maxNumDist { + if debugDecode { + fmt.Println("ndist > maxNumDist", ndist) + } return CorruptInputError(f.roffset) } f.b >>= 5 @@ -453,6 +468,9 @@ func (f *decompressor) readHuffman() error { f.codebits[codeOrder[i]] = 0 } if !f.h1.init(f.codebits[0:]) { + if debugDecode { + fmt.Println("init codebits failed") + } return CorruptInputError(f.roffset) } @@ -480,6 +498,9 @@ func (f *decompressor) readHuffman() error { rep = 3 nb = 2 if i == 0 { + if debugDecode { + fmt.Println("i==0") + } return CorruptInputError(f.roffset) } b = f.bits[i-1] @@ -494,6 +515,9 @@ func (f *decompressor) readHuffman() error { } for f.nb < nb { if err := f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits:", err) + } return err } } @@ -501,6 +525,9 @@ func (f *decompressor) readHuffman() error { f.b >>= nb f.nb -= nb if i+rep > n { + if debugDecode { + fmt.Println("i+rep > n", i, rep, n) + } return CorruptInputError(f.roffset) } for j := 0; j < rep; j++ { @@ -510,6 +537,9 @@ func (f *decompressor) readHuffman() error { } if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + if debugDecode { + fmt.Println("init2 failed") + } return CorruptInputError(f.roffset) } @@ -587,12 +617,18 @@ readLiteral: length = 258 n = 0 default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } f.err = CorruptInputError(f.roffset) return } if n > 0 { for f.nb < n { if err = f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } f.err = err return } @@ -606,6 +642,9 @@ readLiteral: if f.hd == nil { for f.nb < 5 { if err = f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } f.err = err return } @@ -615,6 +654,9 @@ readLiteral: f.nb -= 5 } else { if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } f.err = err return } @@ -629,6 +671,9 @@ readLiteral: extra := (dist & 1) << nb for f.nb < nb { if err = f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<nb:", err) + } f.err = err return } @@ -638,12 +683,18 @@ readLiteral: f.nb -= nb dist = 1<<(nb+1) + 1 + extra default: + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } f.err = CorruptInputError(f.roffset) return } // No check on length; encoding can be prescient. if dist > f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } f.err = CorruptInputError(f.roffset) return } @@ -688,6 +739,9 @@ func (f *decompressor) dataBlock() { n := int(f.buf[0]) | int(f.buf[1])<<8 nn := int(f.buf[2]) | int(f.buf[3])<<8 if uint16(nn) != uint16(^n) { + if debugDecode { + fmt.Println("uint16(nn) != uint16(^n)", nn, ^n) + } f.err = CorruptInputError(f.roffset) return } @@ -789,6 +843,9 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { if n == 0 { f.b = b f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } f.err = CorruptInputError(f.roffset) return 0, f.err } diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go new file mode 100644 index 000000000..20de8f11f --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -0,0 +1,174 @@ +package flate + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL1 struct { + fastGen + table [tableSize]tableEntry +} + +// EncodeL1 uses a similar algorithm to level 1 +func (e *fastEncL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3232(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hash(cv) + candidate = e.table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + nextHash = hash(uint32(now)) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && cv == candidate.val { + e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + break + } + + // Do one right away... + cv = uint32(now) + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && cv == candidate.val { + e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + break + } + cv = uint32(now) + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + + // Save the match found + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + if s >= sLimit { + // Index first pair after match end. + if int(s+l+4) < len(src) { + cv := load3232(src, s) + e.table[hash(cv)] = tableEntry{offset: s + e.cur, val: cv} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hash(uint32(x)) + e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} + x >>= 16 + currHash := hash(uint32(x)) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x)} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != candidate.val { + cv = uint32(x >> 8) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go new file mode 100644 index 000000000..7c824431e --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -0,0 +1,199 @@ +package flate + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL2 struct { + fastGen + table [bTableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *fastEncL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3232(src, s) + for { + // When should we start skipping if we haven't found matches in a long while. + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hash4u(cv, bTableBits) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash] + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + nextHash = hash4u(uint32(now), bTableBits) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && cv == candidate.val { + e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + break + } + + // Do one right away... + cv = uint32(now) + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && cv == candidate.val { + break + } + cv = uint32(now) + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+l+4) < len(src) { + cv := load3232(src, s) + e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur, val: cv} + } + goto emitRemainder + } + + // Store every second hash in-between, but offset by 1. + for i := s - l + 2; i < s-5; i += 7 { + x := load6432(src, int32(i)) + nextHash := hash4u(uint32(x), bTableBits) + e.table[nextHash] = tableEntry{offset: e.cur + i, val: uint32(x)} + // Skip one + x >>= 16 + nextHash = hash4u(uint32(x), bTableBits) + e.table[nextHash] = tableEntry{offset: e.cur + i + 2, val: uint32(x)} + // Skip one + x >>= 16 + nextHash = hash4u(uint32(x), bTableBits) + e.table[nextHash] = tableEntry{offset: e.cur + i + 4, val: uint32(x)} + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hash4u(uint32(x), bTableBits) + prevHash2 := hash4u(uint32(x>>8), bTableBits) + e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} + e.table[prevHash2] = tableEntry{offset: o + 1, val: uint32(x >> 8)} + currHash := hash4u(uint32(x>>16), bTableBits) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x >> 16)} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x>>16) != candidate.val { + cv = uint32(x >> 24) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go new file mode 100644 index 000000000..4153d24c9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -0,0 +1,225 @@ +package flate + +// fastEncL3 +type fastEncL3 struct { + fastGen + table [tableSize]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *fastEncL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + } + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + e.table[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // Skip if too small. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3232(src, s) + for { + const skipLog = 6 + nextS := s + var candidate tableEntry + for { + nextHash := hash(cv) + s = nextS + nextS = s + 1 + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash] + now := load3232(src, nextS) + e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + + // Check both candidates + candidate = candidates.Cur + offset := s - (candidate.offset - e.cur) + if cv == candidate.val { + if offset > maxMatchOffset { + cv = now + // Previous will also be invalid, we have nothing. + continue + } + o2 := s - (candidates.Prev.offset - e.cur) + if cv != candidates.Prev.val || o2 > maxMatchOffset { + break + } + // Both match and are valid, pick longest. + l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) + if l2 > l1 { + candidate = candidates.Prev + } + break + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } + } + cv = now + } + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash := hash(cv) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash] + cv = uint32(x) + e.table[currHash] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go new file mode 100644 index 000000000..c689ac771 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -0,0 +1,210 @@ +package flate + +import "fmt" + +type fastEncL4 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntry +} + +func (e *fastEncL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.bTable[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var t int32 + for { + nextHashS := hash4x64(cv, tableBits) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHashS] = entry + e.bTable[nextHashL] = entry + + t = lCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == lCandidate.val { + // We got a long match. Use that. + break + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { + // Found a 4 match... + lCandidate = e.bTable[hash7(next, tableBits)] + + // If the next long is a candidate, check if we should use that instead... + lOff := nextS - (lCandidate.offset - e.cur) + if lOff < maxMatchOffset && lCandidate.val == uint32(next) { + l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) + if l2 > l1 { + s = nextS + t = lCandidate.offset - e.cur + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + if false { + if t >= s { + panic("s-t") + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+8) < len(src) { + cv := load6432(src, s) + e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} + } + goto emitRemainder + } + + // Store every 3rd hash in-between + if true { + i := nextS + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur, val: uint32(cv)} + t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hash4u(t2.val, tableBits)] = t2 + + i += 3 + for ; i < s-1; i += 3 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur, val: uint32(cv)} + t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hash4u(t2.val, tableBits)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hash4x64(x, tableBits) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)} + e.bTable[prevHashL] = tableEntry{offset: o, val: uint32(x)} + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go new file mode 100644 index 000000000..14a235612 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -0,0 +1,276 @@ +package flate + +import "fmt" + +type fastEncL5 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hash4x64(cv, tableBits) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hash4x64(next, tableBits) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == lCandidate.Cur.val { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if lCandidate.Cur.val == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + if l == 0 { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + if false { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) |