summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml2
-rwxr-xr-xAPI.md4
-rw-r--r--CONTRIBUTING.md28
-rw-r--r--Makefile9
-rw-r--r--cmd/podman/common.go9
-rw-r--r--cmd/podman/create.go10
-rw-r--r--cmd/podman/create_cli.go2
-rw-r--r--cmd/podman/login.go42
-rw-r--r--cmd/podman/main.go3
-rw-r--r--cmd/podman/shared/container.go14
-rw-r--r--cmd/podman/shared/funcs.go4
-rw-r--r--cmd/podman/varlink/io.podman.varlink2
-rw-r--r--completions/bash/podman39
-rw-r--r--docs/libpod.conf.5.md3
-rw-r--r--docs/podman-create.1.md8
-rw-r--r--docs/podman-run.1.md8
-rwxr-xr-xhack/install_catatonit.sh15
-rw-r--r--libpod.conf3
-rw-r--r--libpod/boltdb_state_internal.go14
-rw-r--r--libpod/common_test.go46
-rw-r--r--libpod/container.go5
-rw-r--r--libpod/container_easyjson.go12
-rw-r--r--libpod/container_graph_test.go160
-rw-r--r--libpod/container_internal.go16
-rw-r--r--libpod/image/parts.go12
-rw-r--r--libpod/lock/in_memory_locks.go91
-rw-r--r--libpod/lock/lock.go61
-rw-r--r--libpod/lock/shm/shm_lock.c452
-rw-r--r--libpod/lock/shm/shm_lock.go216
-rw-r--r--libpod/lock/shm/shm_lock.h46
-rw-r--r--libpod/lock/shm/shm_lock_test.go278
-rw-r--r--libpod/lock/shm_lock_manager_linux.go102
-rw-r--r--libpod/lock/shm_lock_manager_unsupported.go29
-rw-r--r--libpod/pod.go7
-rw-r--r--libpod/pod_easyjson.go12
-rw-r--r--libpod/pod_internal.go21
-rw-r--r--libpod/runtime.go96
-rw-r--r--libpod/runtime_ctr.go32
-rw-r--r--libpod/runtime_pod_linux.go15
-rw-r--r--libpod/runtime_volume_linux.go9
-rw-r--r--libpod/state_test.go908
-rw-r--r--libpod/volume.go10
-rw-r--r--pkg/spec/createconfig.go31
-rw-r--r--pkg/spec/spec.go2
-rw-r--r--pkg/sysinfo/README.md (renamed from vendor/github.com/docker/docker/pkg/sysinfo/README.md)0
-rw-r--r--pkg/sysinfo/numcpu.go (renamed from vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go)0
-rw-r--r--pkg/sysinfo/numcpu_linux.go (renamed from vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go)0
-rw-r--r--pkg/sysinfo/numcpu_windows.go (renamed from vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go)0
-rw-r--r--pkg/sysinfo/sysinfo.go (renamed from vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go)0
-rw-r--r--pkg/sysinfo/sysinfo_linux.go254
-rw-r--r--pkg/sysinfo/sysinfo_linux.go~ (renamed from vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go)0
-rw-r--r--pkg/sysinfo/sysinfo_linux_test.go104
-rw-r--r--pkg/sysinfo/sysinfo_solaris.go (renamed from vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go)0
-rw-r--r--pkg/sysinfo/sysinfo_test.go26
-rw-r--r--pkg/sysinfo/sysinfo_unix.go (renamed from vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go)0
-rw-r--r--pkg/sysinfo/sysinfo_windows.go (renamed from vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go)0
-rw-r--r--test/e2e/run_test.go12
-rw-r--r--troubleshooting.md10
-rw-r--r--vendor.conf15
-rw-r--r--vendor/github.com/containers/buildah/common.go14
-rw-r--r--vendor/github.com/containers/buildah/config.go3
-rw-r--r--vendor/github.com/containers/buildah/image.go4
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go7
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go5
-rw-r--r--vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go8
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go4
-rw-r--r--vendor/github.com/containers/buildah/run.go16
-rw-r--r--vendor/github.com/containers/buildah/unshare/unshare.c3
-rw-r--r--vendor/github.com/containers/buildah/util.go23
-rw-r--r--vendor/github.com/containers/buildah/util/util.go50
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf14
-rw-r--r--vendor/github.com/containers/image/copy/copy.go145
-rw-r--r--vendor/github.com/containers/image/directory/directory_dest.go5
-rw-r--r--vendor/github.com/containers/image/directory/directory_src.go5
-rw-r--r--vendor/github.com/containers/image/docker/docker_client.go35
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_dest.go5
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go5
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/dest.go5
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/src.go5
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_dest.go5
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_src.go5
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_dest.go5
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_src.go5
-rw-r--r--vendor/github.com/containers/image/openshift/openshift.go10
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_dest.go9
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_src.go9
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/default.go2
-rw-r--r--vendor/github.com/containers/image/pkg/compression/compression.go4
-rw-r--r--vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go35
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go28
-rw-r--r--vendor/github.com/containers/image/tarball/tarball_src.go10
-rw-r--r--vendor/github.com/containers/image/types/types.go4
-rw-r--r--vendor/github.com/containers/image/vendor.conf10
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/device_setup.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go11
-rw-r--r--vendor/github.com/containers/storage/layers.go8
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_110.go11
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_19.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/store.go2
-rw-r--r--vendor/github.com/containers/storage/vendor.conf3
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/example_changes.go (renamed from vendor/github.com/containers/storage/pkg/archive/example_changes.go)6
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/generate.go32
-rw-r--r--vendor/github.com/klauspost/compress/LICENSE27
-rw-r--r--vendor/github.com/klauspost/compress/README.md160
-rw-r--r--vendor/github.com/klauspost/compress/flate/copy.go32
-rw-r--r--vendor/github.com/klauspost/compress/flate/crc32_amd64.go42
-rw-r--r--vendor/github.com/klauspost/compress/flate/crc32_amd64.s214
-rw-r--r--vendor/github.com/klauspost/compress/flate/crc32_noasm.go35
-rw-r--r--vendor/github.com/klauspost/compress/flate/deflate.go1353
-rw-r--r--vendor/github.com/klauspost/compress/flate/dict_decoder.go184
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go701
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_code.go344
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate.go880
-rw-r--r--vendor/github.com/klauspost/compress/flate/reverse_bits.go48
-rw-r--r--vendor/github.com/klauspost/compress/flate/snappy.go900
-rw-r--r--vendor/github.com/klauspost/compress/flate/token.go115
-rw-r--r--vendor/github.com/klauspost/cpuid/LICENSE22
-rw-r--r--vendor/github.com/klauspost/cpuid/README.md145
-rw-r--r--vendor/github.com/klauspost/cpuid/cpuid.go1040
-rw-r--r--vendor/github.com/klauspost/cpuid/cpuid_386.s42
-rw-r--r--vendor/github.com/klauspost/cpuid/cpuid_amd64.s42
-rw-r--r--vendor/github.com/klauspost/cpuid/detect_intel.go17
-rw-r--r--vendor/github.com/klauspost/cpuid/detect_ref.go23
-rw-r--r--vendor/github.com/klauspost/cpuid/generate.go4
-rw-r--r--vendor/github.com/klauspost/pgzip/GO_LICENSE27
-rw-r--r--vendor/github.com/klauspost/pgzip/LICENSE22
-rw-r--r--vendor/github.com/klauspost/pgzip/README.md136
-rw-r--r--vendor/github.com/klauspost/pgzip/gunzip.go573
-rw-r--r--vendor/github.com/klauspost/pgzip/gzip.go501
-rw-r--r--vendor/github.com/opencontainers/go-digest/LICENSE (renamed from vendor/github.com/opencontainers/go-digest/LICENSE.code)0
-rw-r--r--vendor/github.com/opencontainers/go-digest/README.md2
-rw-r--r--vendor/github.com/opencontainers/go-digest/algorithm.go40
-rw-r--r--vendor/github.com/opencontainers/go-digest/digest.go42
-rw-r--r--vendor/github.com/opencontainers/runc/README.md17
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/README.md8
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go61
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/config.go30
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go6
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go16
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go13
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md6
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c77
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go22
-rw-r--r--vendor/github.com/opencontainers/runc/vendor.conf2
-rw-r--r--vendor/golang.org/x/net/html/atom/atom.go78
-rw-r--r--vendor/golang.org/x/net/html/atom/gen.go648
-rw-r--r--vendor/golang.org/x/net/html/atom/table.go713
-rw-r--r--vendor/golang.org/x/net/html/charset/charset.go257
-rw-r--r--vendor/golang.org/x/net/html/const.go102
-rw-r--r--vendor/golang.org/x/net/html/doc.go106
-rw-r--r--vendor/golang.org/x/net/html/doctype.go156
-rw-r--r--vendor/golang.org/x/net/html/entity.go2253
-rw-r--r--vendor/golang.org/x/net/html/escape.go258
-rw-r--r--vendor/golang.org/x/net/html/foreign.go226
-rw-r--r--vendor/golang.org/x/net/html/node.go193
-rw-r--r--vendor/golang.org/x/net/html/parse.go2094
-rw-r--r--vendor/golang.org/x/net/html/render.go271
-rw-r--r--vendor/golang.org/x/net/html/token.go1219
-rw-r--r--vendor/golang.org/x/sync/LICENSE27
-rw-r--r--vendor/golang.org/x/sync/PATENTS22
-rw-r--r--vendor/golang.org/x/sync/README.md18
-rw-r--r--vendor/golang.org/x/sync/semaphore/semaphore.go127
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/README.md11
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/format.go84
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pb.go193
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go2
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go8
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go6
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go4
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go118
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pool.go62
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go16
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go13
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/runecount.go2
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go7
-rw-r--r--vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go13
178 files changed, 20026 insertions, 1152 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index d1029d554..199c2a533 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -115,7 +115,7 @@ build_each_commit_task:
matrix:
image_name: "fedora-29-libpod-9afa57a9"
- timeout_in: 20m
+ timeout_in: 30m
script:
- $SCRIPT_BASE/setup_environment.sh
diff --git a/API.md b/API.md
index 51787496c..3722c2864 100755
--- a/API.md
+++ b/API.md
@@ -1169,6 +1169,10 @@ image [string](https://godoc.org/builtin#string)
image_id [string](https://godoc.org/builtin#string)
+init [bool](https://godoc.org/builtin#bool)
+
+init_path [string](https://godoc.org/builtin#string)
+
builtin_imgvolumes [[]string](#[]string)
id_mappings [IDMappingOptions](#IDMappingOptions)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 32ed94ad4..26e5473b2 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -226,3 +226,31 @@ and
tracking system.
[owners]: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md#owners
+
+
+### Bot Interactions
+
+The primary human-interface is through comments in pull-requests. Some of these are outlined
+below, along with their meaning and intended usage. Some of them require the comment
+author hold special privileges on the github repository. Others can be used by anyone.
+
+* ``/close``: Closes an issue or PR.
+
+* ``/approve``: Mark a PR as appropriate to the project, and as close to meeting
+ met all the contribution criteria above. Adds the *approved* label, marking
+ it as ready for review and possible future merging.
+
+* ``/lgtm``: A literal "Stamp of approval", signaling okay-to-merge. This causes
+ the bot to ad the *lgtm* label, then attempt a merge. In other words - Never,
+ ever, ever comment ``/lgtm``, unless a PR has actually, really, been fully
+ reviewed. The bot isn't too smart about these things, and could merge
+ unintentionally. Instead, just write ``LGTM``, or
+ spell it out.
+
+* ``[skip ci]``: Within the HEAD commit will cause Cirrus CI to ***NOT*** execute
+ tests on the PR. This is useful in basically two cases: 1) You're still working
+ and don't want to waste resources. 2) You haven't modified any code that would
+ be exercised by the tests. For example, documentation updates (outside of code).
+
+[The complete list may be found on the command-help page.](https://prow.k8s.io/command-help)
+However, not all commands are implemented for this repository. If in doubt, ask a maintainer.
diff --git a/Makefile b/Makefile
index 316815f00..3fd9b4ed4 100644
--- a/Makefile
+++ b/Makefile
@@ -157,10 +157,10 @@ dbuild: libpodimage
${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} make all
test: libpodimage
- ${CONTAINER_RUNTIME} run -e STORAGE_OPTIONS="--storage-driver=vfs" -e TESTFLAGS -e CGROUP_MANAGER=cgroupfs -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${LIBPOD_IMAGE} make clean all localunit localintegration
+ ${CONTAINER_RUNTIME} run -e STORAGE_OPTIONS="--storage-driver=vfs" -e TESTFLAGS -e CGROUP_MANAGER=cgroupfs -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${LIBPOD_IMAGE} make clean all localunit install.catatonit localintegration
integration: libpodimage
- ${CONTAINER_RUNTIME} run -e STORAGE_OPTIONS="--storage-driver=vfs" -e TESTFLAGS -e CGROUP_MANAGER=cgroupfs -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${LIBPOD_IMAGE} make clean all localintegration
+ ${CONTAINER_RUNTIME} run -e STORAGE_OPTIONS="--storage-driver=vfs" -e TESTFLAGS -e CGROUP_MANAGER=cgroupfs -e TRAVIS -t --privileged --rm -v ${CURDIR}:/go/src/${PROJECT} ${LIBPOD_IMAGE} make clean all install.catatonit localintegration
integration.fedora:
DIST=Fedora sh .papr_prepare.sh
@@ -204,7 +204,10 @@ vagrant-check:
binaries: varlink_generate easyjson_generate podman
-test-binaries: test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp test/goecho/goecho
+install.catatonit:
+ ./hack/install_catatonit.sh
+
+test-binaries: test/bin2img/bin2img test/copyimg/copyimg test/checkseccomp/checkseccomp test/goecho/goecho install.catatonit
MANPAGES_MD ?= $(wildcard docs/*.md pkg/*/docs/*.md)
MANPAGES ?= $(MANPAGES_MD:%.md=%)
diff --git a/cmd/podman/common.go b/cmd/podman/common.go
index 8404a29b8..0fc9a6acc 100644
--- a/cmd/podman/common.go
+++ b/cmd/podman/common.go
@@ -321,6 +321,15 @@ var createFlags = []cli.Flag{
Value: "bind",
},
cli.BoolFlag{
+ Name: "init",
+ Usage: "Run an init binary inside the container that forwards signals and reaps processes",
+ },
+ cli.StringFlag{
+ Name: "init-path",
+ // Do not use the Value field for setting the default value to determine user input (i.e., non-empty string)
+ Usage: fmt.Sprintf("Path to the container-init binary (default: %q)", libpod.DefaultInitPath),
+ },
+ cli.BoolFlag{
Name: "interactive, i",
Usage: "Keep STDIN open even if not attached",
},
diff --git a/cmd/podman/create.go b/cmd/podman/create.go
index dae429047..395a64b3b 100644
--- a/cmd/podman/create.go
+++ b/cmd/podman/create.go
@@ -809,6 +809,16 @@ func parseCreateOpts(ctx context.Context, c *cli.Context, runtime *libpod.Runtim
Syslog: c.GlobalBool("syslog"),
}
+ if c.Bool("init") {
+ initPath := c.String("init-path")
+ if initPath == "" {
+ initPath = runtime.GetConfig().InitPath
+ }
+ if err := config.AddContainerInitBinary(initPath); err != nil {
+ return nil, err
+ }
+ }
+
if config.Privileged {
config.LabelOpts = label.DisableSecOpt()
} else {
diff --git a/cmd/podman/create_cli.go b/cmd/podman/create_cli.go
index 2a820fa43..1a0830f2e 100644
--- a/cmd/podman/create_cli.go
+++ b/cmd/podman/create_cli.go
@@ -7,7 +7,7 @@ import (
"strings"
cc "github.com/containers/libpod/pkg/spec"
- "github.com/docker/docker/pkg/sysinfo"
+ "github.com/containers/libpod/pkg/sysinfo"
"github.com/docker/go-units"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
diff --git a/cmd/podman/login.go b/cmd/podman/login.go
index 4452651f8..fc7b39ed8 100644
--- a/cmd/podman/login.go
+++ b/cmd/podman/login.go
@@ -62,12 +62,18 @@ func loginCmd(c *cli.Context) error {
return errors.Errorf("too many arguments, login takes only 1 argument")
}
if len(args) == 0 {
- return errors.Errorf("registry must be given")
+ return errors.Errorf("please specify a registry to login to")
}
server := registryFromFullName(scrubServer(args[0]))
authfile := getAuthFile(c.String("authfile"))
sc := common.GetSystemContext("", authfile, false)
+ if c.IsSet("tls-verify") {
+ sc.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
+ }
+ if c.String("cert-dir") != "" {
+ sc.DockerCertPath = c.String("cert-dir")
+ }
if c.IsSet("get-login") {
user, err := config.GetUserLoggedIn(sc, server)
@@ -87,39 +93,25 @@ func loginCmd(c *cli.Context) error {
// username of user logged in to server (if one exists)
userFromAuthFile, passFromAuthFile, err := config.GetAuthentication(sc, server)
if err != nil {
- return errors.Wrapf(err, "error getting logged-in user")
+ return errors.Wrapf(err, "error reading auth file")
}
ctx := getContext()
-
- var (
- username string
- password string
- )
-
- if userFromAuthFile != "" {
- username = userFromAuthFile
- password = passFromAuthFile
+ // If no username and no password is specified, try to use existing ones.
+ if c.String("username") == "" && c.String("password") == "" {
fmt.Println("Authenticating with existing credentials...")
- if err := docker.CheckAuth(ctx, sc, username, password, server); err == nil {
+ if err := docker.CheckAuth(ctx, sc, userFromAuthFile, passFromAuthFile, server); err == nil {
fmt.Println("Existing credentials are valid. Already logged in to", server)
return nil
}
fmt.Println("Existing credentials are invalid, please enter valid username and password")
}
- username, password, err = getUserAndPass(c.String("username"), c.String("password"), userFromAuthFile)
+ username, password, err := getUserAndPass(c.String("username"), c.String("password"), userFromAuthFile)
if err != nil {
return errors.Wrapf(err, "error getting username and password")
}
- if c.IsSet("tls-verify") {
- sc.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
- }
- if c.String("cert-dir") != "" {
- sc.DockerCertPath = c.String("cert-dir")
- }
-
if err = docker.CheckAuth(ctx, sc, username, password, server); err == nil {
// Write the new credentials to the authfile
if err = config.SetAuthentication(sc, server, username, password); err != nil {
@@ -131,14 +123,15 @@ func loginCmd(c *cli.Context) error {
fmt.Println("Login Succeeded!")
return nil
case docker.ErrUnauthorizedForCredentials:
- return errors.Errorf("error logging into %q: invalid username/password\n", server)
+ return errors.Errorf("error logging into %q: invalid username/password", server)
default:
return errors.Wrapf(err, "error authenticating creds for %q", server)
}
}
// getUserAndPass gets the username and password from STDIN if not given
-// using the -u and -p flags
+// using the -u and -p flags. If the username prompt is left empty, the
+// displayed userFromAuthFile will be used instead.
func getUserAndPass(username, password, userFromAuthFile string) (string, string, error) {
var err error
reader := bufio.NewReader(os.Stdin)
@@ -152,7 +145,10 @@ func getUserAndPass(username, password, userFromAuthFile string) (string, string
if err != nil {
return "", "", errors.Wrapf(err, "error reading username")
}
- // If no username provided, use userFromAuthFile instead.
+ // If the user just hit enter, use the displayed user from the
+ // the authentication file. This allows to do a lazy
+ // `$ podman login -p $NEW_PASSWORD` without specifying the
+ // user.
if strings.TrimSpace(username) == "" {
username = userFromAuthFile
}
diff --git a/cmd/podman/main.go b/cmd/podman/main.go
index 7ef22a93b..43804ee35 100644
--- a/cmd/podman/main.go
+++ b/cmd/podman/main.go
@@ -161,6 +161,9 @@ func main() {
logrus.Info("running as rootless")
}
+ // Be sure we can create directories with 0755 mode.
+ syscall.Umask(0022)
+
if logLevel == "debug" {
debug = true
diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go
index 30beb4a49..6c7d8eb52 100644
--- a/cmd/podman/shared/container.go
+++ b/cmd/podman/shared/container.go
@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
+ "github.com/google/shlex"
"io"
"os"
"path/filepath"
@@ -640,6 +641,14 @@ func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtim
// GenerateRunlabelCommand generates the command that will eventually be execucted by podman
func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]string, extraArgs []string) ([]string, []string, error) {
+ // If no name is provided, we use the image's basename instead
+ if name == "" {
+ baseName, err := image.GetImageBaseName(imageName)
+ if err != nil {
+ return nil, nil, err
+ }
+ name = baseName
+ }
// The user provided extra arguments that need to be tacked onto the label's command
if len(extraArgs) > 0 {
runLabel = fmt.Sprintf("%s %s", runLabel, strings.Join(extraArgs, " "))
@@ -665,7 +674,10 @@ func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]s
return ""
}
newS := os.Expand(strings.Join(cmd, " "), envmapper)
- cmd = strings.Split(newS, " ")
+ cmd, err = shlex.Split(newS)
+ if err != nil {
+ return nil, nil, err
+ }
return cmd, env, nil
}
diff --git a/cmd/podman/shared/funcs.go b/cmd/podman/shared/funcs.go
index 8770b8ec0..70d041fd2 100644
--- a/cmd/podman/shared/funcs.go
+++ b/cmd/podman/shared/funcs.go
@@ -65,6 +65,8 @@ func GenerateCommand(command, imageName, name string) ([]string, error) {
switch arg {
case "IMAGE":
newArg = imageName
+ case "$IMAGE":
+ newArg = imageName
case "IMAGE=IMAGE":
newArg = fmt.Sprintf("IMAGE=%s", imageName)
case "IMAGE=$IMAGE":
@@ -75,6 +77,8 @@ func GenerateCommand(command, imageName, name string) ([]string, error) {
newArg = fmt.Sprintf("NAME=%s", name)
case "NAME=$NAME":
newArg = fmt.Sprintf("NAME=%s", name)
+ case "$NAME":
+ newArg = name
default:
newArg = arg
}
diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink
index c1b7c703a..4e8b69faf 100644
--- a/cmd/podman/varlink/io.podman.varlink
+++ b/cmd/podman/varlink/io.podman.varlink
@@ -211,6 +211,8 @@ type Create (
hostname: string,
image: string,
image_id: string,
+ init: bool,
+ init_path: string,
builtin_imgvolumes: []string,
id_mappings: IDMappingOptions,
image_volume_type: string,
diff --git a/completions/bash/podman b/completions/bash/podman
index a85574d10..d65f54690 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -47,10 +47,10 @@ __podman_pods() {
format='{{.ID}}'
shift
elif [ "$1" = "--name" ] ; then
- format='{{.Names}}'
+ format='{{.Name}}'
shift
else
- format='{{.Names}}'
+ format='{{.Name}}'
fi
__podman_q pod ps --format "$format" "$@"
}
@@ -1194,7 +1194,14 @@ _podman_import() {
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
;;
*)
- __podman_list_images
+ local counter=$( __podman_pos_first_nonflag "$options_with_args" )
+ if [ "$cword" -eq "$((counter))" ]; then
+ __podman_complete_images --repo --tag
+ return
+ else
+ _filedir
+ return
+ fi
;;
esac
}
@@ -2271,10 +2278,26 @@ _podman_generate_kube() {
local options_with_args=""
local boolean_options="
+ -h
+ --help
-s
--service
"
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "
+ $(__podman_containers --all)
+ $(__podman_pods)
+ " -- "$cur" ) )
+ __ltrim_colon_completions "$cur"
+ ;;
+ esac
+}
+
_podman_play_kube() {
local options_with_args="
--authfile
@@ -2291,6 +2314,16 @@ _podman_play_kube() {
--tls-verify
"
+ case "$cur" in
+ -*)
+ COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
+ ;;
+ *)
+ _filedir
+ ;;
+ esac
+}
+
_podman_container_runlabel() {
local options_with_args="
--authfile
diff --git a/docs/libpod.conf.5.md b/docs/libpod.conf.5.md
index d63baeb88..c02d247fb 100644
--- a/docs/libpod.conf.5.md
+++ b/docs/libpod.conf.5.md
@@ -24,6 +24,9 @@ libpod to manage containers.
**cgroup_manager**=""
Specify the CGroup Manager to use; valid values are "systemd" and "cgroupfs"
+**init_path**=""
+ Path to the container-init binary, which forwards signals and reaps processes within containers. Note that the container-init binary will only be used when the `--init` for podman-create and podman-run is set.
+
**hooks_dir**=["*path*", ...]
Each `*.json` file in the path configures a hook for Podman containers. For more details on the syntax of the JSON files and the semantics of hook injection, see `oci-hooks(5)`. Podman and libpod currently support both the 1.0.0 and 0.1.0 hook schemas, although the 0.1.0 schema is deprecated.
diff --git a/docs/podman-create.1.md b/docs/podman-create.1.md
index 97d6e77b1..3a75a4b00 100644
--- a/docs/podman-create.1.md
+++ b/docs/podman-create.1.md
@@ -276,6 +276,14 @@ tmpfs: The volume is mounted onto the container as a tmpfs, which allows the use
content that disappears when the container is stopped.
ignore: All volumes are just ignored and no action is taken.
+**--init**
+
+Run an init inside the container that forwards signals and reaps processes.
+
+**--init-path**=""
+
+Path to the container-init binary.
+
**-i**, **--interactive**=*true*|*false*
Keep STDIN open even if not attached. The default is *false*.
diff --git a/docs/podman-run.1.md b/docs/podman-run.1.md
index c0a466a9c..971b8829a 100644
--- a/docs/podman-run.1.md
+++ b/docs/podman-run.1.md
@@ -285,6 +285,14 @@ the container for the volumes.
content that disappears when the container is stopped.
- `ignore`: All volumes are just ignored and no action is taken.
+**--init**
+
+Run an init inside the container that forwards signals and reaps processes.
+
+**--init-path**=""
+
+Path to the container-init binary.
+
**-i**, **--interactive**=*true*|*false*
Keep STDIN open even if not attached. The default is *false*.
diff --git a/hack/install_catatonit.sh b/hack/install_catatonit.sh
new file mode 100755
index 000000000..e5532a200
--- /dev/null
+++ b/hack/install_catatonit.sh
@@ -0,0 +1,15 @@
+#!/bin/bash -e
+BASE_PATH="/usr/libexec/podman"
+CATATONIT_PATH="${BASE_PATH}/catatonit"
+CATATONIT_VERSION="v0.1.3"
+
+if [ -f $CATATONIT_PATH ]; then
+ echo "skipping ... catatonit is already installed"
+else
+ echo "downloading catatonit to $CATATONIT_PATH"
+ curl -o catatonit -L https://github.com/openSUSE/catatonit/releases/download/$CATATONIT_VERSION/catatonit.x86_64
+ chmod +x catatonit
+ install ${SELINUXOPT} -d -m 755 $BASE_PATH
+ install ${SELINUXOPT} -m 755 catatonit $CATATONIT_PATH
+ rm catatonit
+fi
diff --git a/libpod.conf b/libpod.conf
index d7469af68..cfdf83775 100644
--- a/libpod.conf
+++ b/libpod.conf
@@ -35,6 +35,9 @@ conmon_env_vars = [
# CGroup Manager - valid values are "systemd" and "cgroupfs"
cgroup_manager = "systemd"
+# Container init binary
+#init_path = "/usr/libexec/podman/catatonit"
+
# Directory for persistent libpod files (database, etc)
# By default, this will be configured relative to where containers/storage
# stores containers
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 06f8dcb24..29a7184c9 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -3,7 +3,6 @@ package libpod
import (
"bytes"
"encoding/json"
- "path/filepath"
"runtime"
"strings"
@@ -288,10 +287,9 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
}
// Get the lock
- lockPath := filepath.Join(s.runtime.lockDir, string(id))
- lock, err := storage.GetLockfile(lockPath)
+ lock, err := s.runtime.lockManager.RetrieveLock(ctr.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error retrieving lockfile for container %s", string(id))
+ return errors.Wrapf(err, "error retrieving lock for container %s", string(id))
}
ctr.lock = lock
@@ -324,10 +322,9 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error
}
// Get the lock
- lockPath := filepath.Join(s.runtime.lockDir, string(id))
- lock, err := storage.GetLockfile(lockPath)
+ lock, err := s.runtime.lockManager.RetrieveLock(pod.config.LockID)
if err != nil {
- return errors.Wrapf(err, "error retrieving lockfile for pod %s", string(id))
+ return errors.Wrapf(err, "error retrieving lock for pod %s", string(id))
}
pod.lock = lock
@@ -353,8 +350,7 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
}
// Get the lock
- lockPath := filepath.Join(s.runtime.lockDir, string(name))
- lock, err := storage.GetLockfile(lockPath)
+ lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
if err != nil {
return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name))
}
diff --git a/libpod/common_test.go b/libpod/common_test.go
index 6c7434fd2..882468a3a 100644
--- a/libpod/common_test.go
+++ b/libpod/common_test.go
@@ -3,20 +3,19 @@ package libpod
import (
"encoding/json"
"net"
- "path/filepath"
"reflect"
"strings"
"testing"
"time"
- "github.com/containers/storage"
+ "github.com/containers/libpod/libpod/lock"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/opencontainers/runtime-tools/generate"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-func getTestContainer(id, name, locksDir string) (*Container, error) {
+func getTestContainer(id, name string, manager lock.Manager) (*Container, error) {
ctr := &Container{
config: &Config{
ID: id,
@@ -90,18 +89,18 @@ func getTestContainer(id, name, locksDir string) (*Container, error) {
ctr.config.Labels["test"] = "testing"
- // Must make lockfile or container will error on being retrieved from DB
- lockPath := filepath.Join(locksDir, id)
- lock, err := storage.GetLockfile(lockPath)
+ // Allocate a lock for the container
+ lock, err := manager.AllocateLock()
if err != nil {
return nil, err
}
ctr.lock = lock
+ ctr.config.LockID = lock.ID()
return ctr, nil
}
-func getTestPod(id, name, locksDir string) (*Pod, error) {
+func getTestPod(id, name string, manager lock.Manager) (*Pod, error) {
pod := &Pod{
config: &PodConfig{
ID: id,
@@ -115,38 +114,39 @@ func getTestPod(id, name, locksDir string) (*Pod, error) {
valid: true,
}
- lockPath := filepath.Join(locksDir, id)
- lock, err := storage.GetLockfile(lockPath)
+ // Allocate a lock for the pod
+ lock, err := manager.AllocateLock()
if err != nil {
return nil, err
}
pod.lock = lock
+ pod.config.LockID = lock.ID()
return pod, nil
}
-func getTestCtrN(n, lockPath string) (*Container, error) {
- return getTestContainer(strings.Repeat(n, 32), "test"+n, lockPath)
+func getTestCtrN(n string, manager lock.Manager) (*Container, error) {
+ return getTestContainer(strings.Repeat(n, 32), "test"+n, manager)
}
-func getTestCtr1(lockPath string) (*Container, error) {
- return getTestCtrN("1", lockPath)
+func getTestCtr1(manager lock.Manager) (*Container, error) {
+ return getTestCtrN("1", manager)
}
-func getTestCtr2(lockPath string) (*Container, error) {
- return getTestCtrN("2", lockPath)
+func getTestCtr2(manager lock.Manager) (*Container, error) {
+ return getTestCtrN("2", manager)
}
-func getTestPodN(n, lockPath string) (*Pod, error) {
- return getTestPod(strings.Repeat(n, 32), "test"+n, lockPath)
+func getTestPodN(n string, manager lock.Manager) (*Pod, error) {
+ return getTestPod(strings.Repeat(n, 32), "test"+n, manager)
}
-func getTestPod1(lockPath string) (*Pod, error) {
- return getTestPodN("1", lockPath)
+func getTestPod1(manager lock.Manager) (*Pod, error) {
+ return getTestPodN("1", manager)
}
-func getTestPod2(lockPath string) (*Pod, error) {
- return getTestPodN("2", lockPath)
+func getTestPod2(manager lock.Manager) (*Pod, error) {
+ return getTestPodN("2", manager)
}
// This horrible hack tests if containers are equal in a way that should handle
@@ -174,6 +174,8 @@ func testContainersEqual(t *testing.T, a, b *Container, allowedEmpty bool) {
assert.Equal(t, a.valid, b.valid)
+ assert.Equal(t, a.lock.ID(), b.lock.ID())
+
aConfigJSON, err := json.Marshal(a.config)
assert.NoError(t, err)
err = json.Unmarshal(aConfigJSON, aConfig)
@@ -223,6 +225,8 @@ func testPodsEqual(t *testing.T, a, b *Pod, allowedEmpty bool) {
assert.Equal(t, a.valid, b.valid)
+ assert.Equal(t, a.lock.ID(), b.lock.ID())
+
assert.EqualValues(t, a.config, b.config)
if allowedEmpty {
diff --git a/libpod/container.go b/libpod/container.go
index b4190344a..4e5088b32 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -9,6 +9,7 @@ import (
"github.com/containernetworking/cni/pkg/types"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
+ "github.com/containers/libpod/libpod/lock"
"github.com/containers/libpod/pkg/namespaces"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
@@ -122,7 +123,7 @@ type Container struct {
batched bool
valid bool
- lock storage.Locker
+ lock lock.Locker
runtime *Runtime
rootlessSlirpSyncR *os.File
@@ -211,6 +212,8 @@ type Config struct {
Pod string `json:"pod,omitempty"`
// Namespace the container is in
Namespace string `json:"namespace,omitempty"`
+ // ID of this container's lock
+ LockID uint32 `json:"lockID"`
// TODO consider breaking these subsections up into smaller structs
diff --git a/libpod/container_easyjson.go b/libpod/container_easyjson.go
index 299a645e1..ae0972cf6 100644
--- a/libpod/container_easyjson.go
+++ b/libpod/container_easyjson.go
@@ -1275,6 +1275,8 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.Pod = string(in.String())
case "namespace":
out.Namespace = string(in.String())
+ case "lockID":
+ out.LockID = uint32(in.Uint32())
case "idMappingsOptions":
easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComContainersStorage(in, &out.IDMappings)
case "rootfsImageID":
@@ -1778,6 +1780,16 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
out.String(string(in.Namespace))
}
+ {
+ const prefix string = ",\"lockID\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Uint32(uint32(in.LockID))
+ }
if true {
const prefix string = ",\"idMappingsOptions\":"
if first {
diff --git a/libpod/container_graph_test.go b/libpod/container_graph_test.go
index 25461f1f4..d1a52658d 100644
--- a/libpod/container_graph_test.go
+++ b/libpod/container_graph_test.go
@@ -1,10 +1,9 @@
package libpod
import (
- "io/ioutil"
- "os"
"testing"
+ "github.com/containers/libpod/libpod/lock"
"github.com/stretchr/testify/assert"
)
@@ -17,11 +16,12 @@ func TestBuildContainerGraphNoCtrsIsEmpty(t *testing.T) {
}
func TestBuildContainerGraphOneCtr(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
graph, err := buildContainerGraph([]*Container{ctr1})
@@ -39,13 +39,14 @@ func TestBuildContainerGraphOneCtr(t *testing.T) {
}
func TestBuildContainerGraphTwoCtrNoEdge(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- ctr2, err := getTestCtr2(tmpDir)
+ ctr2, err := getTestCtr2(manager)
assert.NoError(t, err)
graph, err := buildContainerGraph([]*Container{ctr1, ctr2})
@@ -64,13 +65,14 @@ func TestBuildContainerGraphTwoCtrNoEdge(t *testing.T) {
}
func TestBuildContainerGraphTwoCtrOneEdge(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- ctr2, err := getTestCtr2(tmpDir)
+ ctr2, err := getTestCtr2(manager)
assert.NoError(t, err)
ctr2.config.UserNsCtr = ctr1.config.ID
@@ -85,13 +87,14 @@ func TestBuildContainerGraphTwoCtrOneEdge(t *testing.T) {
}
func TestBuildContainerGraphTwoCtrCycle(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- ctr2, err := getTestCtr2(tmpDir)
+ ctr2, err := getTestCtr2(manager)
assert.NoError(t, err)
ctr2.config.UserNsCtr = ctr1.config.ID
ctr1.config.NetNsCtr = ctr2.config.ID
@@ -101,15 +104,16 @@ func TestBuildContainerGraphTwoCtrCycle(t *testing.T) {
}
func TestBuildContainerGraphThreeCtrNoEdges(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- ctr2, err := getTestCtr2(tmpDir)
+ ctr2, err := getTestCtr2(manager)
assert.NoError(t, err)
- ctr3, err := getTestCtrN("3", tmpDir)
+ ctr3, err := getTestCtrN("3", manager)
assert.NoError(t, err)
graph, err := buildContainerGraph([]*Container{ctr1, ctr2, ctr3})
@@ -132,15 +136,16 @@ func TestBuildContainerGraphThreeCtrNoEdges(t *testing.T) {
}
func TestBuildContainerGraphThreeContainersTwoInCycle(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- ctr2, err := getTestCtr2(tmpDir)
+ ctr2, err := getTestCtr2(manager)
assert.NoError(t, err)
- ctr3, err := getTestCtrN("3", tmpDir)
+ ctr3, err := getTestCtrN("3", manager)
assert.NoError(t, err)
ctr1.config.UserNsCtr = ctr2.config.ID
ctr2.config.IPCNsCtr = ctr1.config.ID
@@ -150,15 +155,16 @@ func TestBuildContainerGraphThreeContainersTwoInCycle(t *testing.T) {
}
func TestBuildContainerGraphThreeContainersCycle(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- ctr2, err := getTestCtr2(tmpDir)
+ ctr2, err := getTestCtr2(manager)
assert.NoError(t, err)
- ctr3, err := getTestCtrN("3", tmpDir)
+ ctr3, err := getTestCtrN("3", manager)
assert.NoError(t, err)
ctr1.config.UserNsCtr = ctr2.config.ID
ctr2.config.IPCNsCtr = ctr3.config.ID
@@ -169,15 +175,16 @@ func TestBuildContainerGraphThreeContainersCycle(t *testing.T) {
}
func TestBuildContainerGraphThreeContainersNoCycle(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- ctr2, err := getTestCtr2(tmpDir)
+ ctr2, err := getTestCtr2(manager)
assert.NoError(t, err)
- ctr3, err := getTestCtrN("3", tmpDir)
+ ctr3, err := getTestCtrN("3", manager)
assert.NoError(t, err)
ctr1.config.UserNsCtr = ctr2.config.ID
ctr1.config.NetNsCtr = ctr3.config.ID
@@ -194,17 +201,18 @@ func TestBuildContainerGraphThreeContainersNoCycle(t *testing.T) {
}
func TestBuildContainerGraphFourContainersNoEdges(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- ctr2, err := getTestCtr2(tmpDir)
+ ctr2, err := getTestCtr2(manager)
assert.NoError(t, err)
- ctr3, err := getTestCtrN("3", tmpDir)
+ ctr3, err := getTestCtrN("3", manager)
assert.NoError(t, err)
- ctr4, err := getTestCtrN("4", tmpDir)
+ ctr4, err := getTestCtrN("4", manager)
assert.NoError(t, err)
graph, err := buildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4})
@@ -231,18 +239,20 @@ func TestBuildContainerGraphFourContainersNoEdges(t *testing.T) {
}
func TestBuildContainerGraphFourContainersTwoInCycle(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- ctr2, err := getTestCtr2(tmpDir)
+ ctr2, err := getTestCtr2(manager)
assert.NoError(t, err)
- ctr3, err := getTestCtrN("3", tmpDir)
+ ctr3, err := getTestCtrN("3", manager)
assert.NoError(t, err)
- ctr4, err := getTestCtrN("4", tmpDir)
+ ctr4, err := getTestCtrN("4", manager)
assert.NoError(t, err)
+
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr2.config.UserNsCtr = ctr1.config.ID
@@ -251,18 +261,20 @@ func TestBuildContainerGraphFourContainersTwoInCycle(t *testing.T) {
}
func TestBuildContainerGraphFourContainersAllInCycle(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- ctr2, err := getTestCtr2(tmpDir)
+ ctr2, err := getTestCtr2(manager)
assert.NoError(t, err)
- ctr3, err := getTestCtrN("3", tmpDir)
+ ctr3, err := getTestCtrN("3", manager)
assert.NoError(t, err)
- ctr4, err := getTestCtrN("4", tmpDir)
+ ctr4, err := getTestCtrN("4", manager)
assert.NoError(t, err)
+
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr2.config.UserNsCtr = ctr3.config.ID
ctr3.config.NetNsCtr = ctr4.config.ID
@@ -273,18 +285,20 @@ func TestBuildContainerGraphFourContainersAllInCycle(t *testing.T) {
}
func TestBuildContainerGraphFourContainersNoneInCycle(t *testing.T) {
- tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
- assert.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ manager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ t.Fatalf("Error setting up locks: %v", err)
+ }
- ctr1, err := getTestCtr1(tmpDir)
+ ctr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- ctr2, err := getTestCtr2(tmpDir)
+ ctr2, err := getTestCtr2(manager)
assert.NoError(t, err)
- ctr3, err := getTestCtrN("3", tmpDir)
+ ctr3, err := getTestCtrN("3", manager)
assert.NoError(t, err)
- ctr4, err := getTestCtrN("4", tmpDir)
+ ctr4, err := getTestCtrN("4", manager)
assert.NoError(t, err)
+
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr1.config.NetNsCtr = ctr3.config.ID
ctr2.config.UserNsCtr = ctr3.config.ID
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 928be52ae..cc4c36bc9 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -401,7 +401,10 @@ func resetState(state *containerState) error {
return nil
}
-// Refresh refreshes the container's state after a restart
+// Refresh refreshes the container's state after a restart.
+// Refresh cannot perform any operations that would lock another container.
+// We cannot guarantee any other container has a valid lock at the time it is
+// running.
func (c *Container) refresh() error {
// Don't need a full sync, but we do need to update from the database to
// pick up potentially-missing container state
@@ -447,6 +450,13 @@ func (c *Container) refresh() error {
c.state.DestinationRunDir = filepath.Join(c.state.UserNSRoot, "rundir")
}
+ // We need to pick up a new lock
+ lock, err := c.runtime.lockManager.RetrieveLock(c.config.LockID)
+ if err != nil {
+ return errors.Wrapf(err, "error acquiring lock for container %s", c.ID())
+ }
+ c.lock = lock
+
if err := c.save(); err != nil {
return errors.Wrapf(err, "error refreshing state for container %s", c.ID())
}
@@ -748,6 +758,10 @@ func (c *Container) initAndStart(ctx context.Context) (err error) {
// Internal, non-locking function to start a container
func (c *Container) start() error {
+ if c.config.Spec.Process != nil {
+ logrus.Debugf("Starting container %s with command %v", c.ID(), c.config.Spec.Process.Args)
+ }
+
if err := c.runtime.ociRuntime.startContainer(c); err != nil {
return err
}
diff --git a/libpod/image/parts.go b/libpod/image/parts.go
index 1509005e5..9adf26fb9 100644
--- a/libpod/image/parts.go
+++ b/libpod/image/parts.go
@@ -22,6 +22,18 @@ func isRegistry(name string) bool {
return strings.ContainsAny(name, ".:") || name == "localhost"
}
+// GetImageBaseName uses decompose and string splits to obtain the base
+// name of an image. Doing this here because it beats changing the
+// imageParts struct names to be exported as well.
+func GetImageBaseName(input string) (string, error) {
+ decomposedImage, err := decompose(input)
+ if err != nil {
+ return "", err
+ }
+ splitImageName := strings.Split(decomposedImage.name, "/")
+ return splitImageName[len(splitImageName)-1], nil
+}
+
// decompose breaks an input name into an imageParts description
func decompose(input string) (imageParts, error) {
var (
diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go
new file mode 100644
index 000000000..db8f20e95
--- /dev/null
+++ b/libpod/lock/in_memory_locks.go
@@ -0,0 +1,91 @@
+package lock
+
+import (
+ "sync"
+
+ "github.com/pkg/errors"
+)
+
+// Mutex holds a single mutex and whether it has been allocated.
+type Mutex struct {
+ id uint32
+ lock sync.Mutex
+ allocated bool
+}
+
+// ID retrieves the ID of the mutex
+func (m *Mutex) ID() uint32 {
+ return m.id
+}
+
+// Lock locks the mutex
+func (m *Mutex) Lock() {
+ m.lock.Lock()
+}
+
+// Unlock unlocks the mutex
+func (m *Mutex) Unlock() {
+ m.lock.Unlock()
+}
+
+// Free deallocates the mutex to allow its reuse
+func (m *Mutex) Free() error {
+ m.allocated = false
+
+ return nil
+}
+
+// InMemoryManager is a lock manager that allocates and retrieves local-only
+// locks - that is, they are not multiprocess. This lock manager is intended
+// purely for unit and integration testing and should not be used in production
+// deployments.
+type InMemoryManager struct {
+ locks []*Mutex
+ numLocks uint32
+ localLock sync.Mutex
+}
+
+// NewInMemoryManager creates a new in-memory lock manager with the given number
+// of locks.
+func NewInMemoryManager(numLocks uint32) (Manager, error) {
+ if numLocks == 0 {
+ return nil, errors.Errorf("must provide a non-zero number of locks!")
+ }
+
+ manager := new(InMemoryManager)
+ manager.numLocks = numLocks
+ manager.locks = make([]*Mutex, numLocks)
+
+ var i uint32
+ for i = 0; i < numLocks; i++ {
+ lock := new(Mutex)
+ lock.id = i
+ manager.locks[i] = lock
+ }
+
+ return manager, nil
+}
+
+// AllocateLock allocates a lock from the manager.
+func (m *InMemoryManager) AllocateLock() (Locker, error) {
+ m.localLock.Lock()
+ defer m.localLock.Unlock()
+
+ for _, lock := range m.locks {
+ if !lock.allocated {
+ lock.allocated = true
+ return lock, nil
+ }
+ }
+
+ return nil, errors.Errorf("all locks have been allocated")
+}
+
+// RetrieveLock retrieves a lock from the manager.
+func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
+ if id >= m.numLocks {
+ return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks-1)
+ }
+
+ return m.locks[id], nil
+}
diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go
new file mode 100644
index 000000000..1f94171fe
--- /dev/null
+++ b/libpod/lock/lock.go
@@ -0,0 +1,61 @@
+package lock
+
+// Manager provides an interface for allocating multiprocess locks.
+// Locks returned by Manager MUST be multiprocess - allocating a lock in
+// process A and retrieving that lock's ID in process B must return handles for
+// the same lock, and locking the lock in A should exclude B from the lock until
+// it is unlocked in A.
+// All locks must be identified by a UUID (retrieved with Locker's ID() method).
+// All locks with a given UUID must refer to the same underlying lock, and it
+// must be possible to retrieve the lock given its UUID.
+// Each UUID should refer to a unique underlying lock.
+// Calls to AllocateLock() must return a unique, unallocated UUID.
+// AllocateLock() must fail once all available locks have been allocated.
+// Locks are returned to use by calls to Free(), and can subsequently be
+// reallocated.
+type Manager interface {
+ // AllocateLock returns an unallocated lock.
+ // It is guaranteed that the same lock will not be returned again by
+ // AllocateLock until the returned lock has Free() called on it.
+ // If all available locks are allocated, AllocateLock will return an
+ // error.
+ AllocateLock() (Locker, error)
+ // RetrieveLock retrieves a lock given its UUID.
+ // The underlying lock MUST be the same as another other lock with the
+ // same UUID.
+ RetrieveLock(id uint32) (Locker, error)
+}
+
+// Locker is similar to sync.Locker, but provides a method for freeing the lock
+// to allow its reuse.
+// All Locker implementations must maintain mutex semantics - the lock only
+// allows one caller in the critical section at a time.
+// All locks with the same ID must refer to the same underlying lock, even
+// if they are within multiple processes.
+type Locker interface {
+ // ID retrieves the lock's ID.
+ // ID is guaranteed to uniquely identify the lock within the
+ // Manager - that is, calling RetrieveLock with this ID will return
+ // another instance of the same lock.
+ ID() uint32
+ // Lock locks the lock.
+ // This call MUST block until it successfully acquires the lock or
+ // encounters a fatal error.
+ // All errors must be handled internally, as they are not returned. For
+ // the most part, panicking should be appropriate.
+ // Some lock implementations may require that Lock() and Unlock() occur
+ // within the same goroutine (SHM locking, for example). The usual Go
+ // Lock()/defer Unlock() pattern will still work fine in these cases.
+ Lock()
+ // Unlock unlocks the lock.
+ // All errors must be handled internally, as they are not returned. For
+ // the most part, panicking should be appropriate.
+ // This includes unlocking locks which are already unlocked.
+ Unlock()
+ // Free deallocates the underlying lock, allowing its reuse by other
+ // pods and containers.
+ // The lock MUST still be usable after a Free() - some libpod instances
+ // may still retain Container structs with the old lock. This simply
+ // advises the manager that the lock may be reallocated.
+ Free() error
+}
diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c
new file mode 100644
index 000000000..4af58d857
--- /dev/null
+++ b/libpod/lock/shm/shm_lock.c
@@ -0,0 +1,452 @@
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "shm_lock.h"
+
+// Compute the size of the SHM struct
+static size_t compute_shm_size(uint32_t num_bitmaps) {
+ return sizeof(shm_struct_t) + (num_bitmaps * sizeof(lock_group_t));
+}
+
+// Take the given mutex.
+// Handles exceptional conditions, including a mutex locked by a process that
+// died holding it.
+// Returns 0 on success, or positive errno on failure.
+static int take_mutex(pthread_mutex_t *mutex) {
+ int ret_code;
+
+ do {
+ ret_code = pthread_mutex_lock(mutex);
+ } while(ret_code == EAGAIN);
+
+ if (ret_code == EOWNERDEAD) {
+ // The previous owner of the mutex died while holding it
+ // Take it for ourselves
+ ret_code = pthread_mutex_consistent(mutex);
+ if (ret_code != 0) {
+ // Someone else may have gotten here first and marked the state consistent
+ // However, the mutex could also be invalid.
+ // Fail here instead of looping back to trying to lock the mutex.
+ return ret_code;
+ }
+ } else if (ret_code != 0) {
+ return ret_code;
+ }
+
+ return 0;
+}
+
+// Release the given mutex.
+// Returns 0 on success, or positive errno on failure.
+static int release_mutex(pthread_mutex_t *mutex) {
+ int ret_code;
+
+ do {
+ ret_code = pthread_mutex_unlock(mutex);
+ } while(ret_code == EAGAIN);
+
+ if (ret_code != 0) {
+ return ret_code;
+ }
+
+ return 0;
+}
+
+// Set up an SHM segment holding locks for libpod.
+// num_locks must not be 0.
+// Path is the path to the SHM segment. It must begin with a single / and
+// container no other / characters, and be at most 255 characters including
+// terminating NULL byte.
+// Returns a valid pointer on success or NULL on error.
+// If an error occurs, negative ERRNO values will be written to error_code.
+shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) {
+ int shm_fd, i, j, ret_code;
+ uint32_t num_bitmaps;
+ size_t shm_size;
+ shm_struct_t *shm;
+ pthread_mutexattr_t attr;
+
+ // If error_code doesn't point to anything, we can't reasonably return errors
+ // So fail immediately
+ if (error_code == NULL) {
+ return NULL;
+ }
+
+ // We need a nonzero number of locks
+ if (num_locks == 0) {
+ *error_code = -1 * EINVAL;
+ return NULL;
+ }
+
+ if (path == NULL) {
+ *error_code = -1 * EINVAL;
+ return NULL;
+ }
+
+ // Calculate the number of bitmaps required
+ num_bitmaps = num_locks / BITMAP_SIZE;
+ if (num_locks % BITMAP_SIZE != 0) {
+ // The actual number given is not an even multiple of our bitmap size
+ // So round up
+ num_bitmaps += 1;
+ }
+
+ // Calculate size of the shm segment
+ shm_size = compute_shm_size(num_bitmaps);
+
+ // Create a new SHM segment for us
+ shm_fd = shm_open(path, O_RDWR | O_CREAT | O_EXCL, 0600);
+ if (shm_fd < 0) {
+ *error_code = -1 * errno;
+ return NULL;
+ }
+
+ // Increase its size to what we need
+ ret_code = ftruncate(shm_fd, shm_size);
+ if (ret_code < 0) {
+ *error_code = -1 * errno;
+ goto CLEANUP_UNLINK;
+ }
+
+ // Map the shared memory in
+ shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
+ if (shm == MAP_FAILED) {
+ *error_code = -1 * errno;
+ goto CLEANUP_UNLINK;
+ }
+
+ // We have successfully mapped the memory, now initialize the region
+ shm->magic = MAGIC;
+ shm->unused = 0;
+ shm->num_locks = num_bitmaps * BITMAP_SIZE;
+ shm->num_bitmaps = num_bitmaps;
+
+ // Create an initializer for our pthread mutexes
+ ret_code = pthread_mutexattr_init(&attr);
+ if (ret_code != 0) {
+ *error_code = -1 * ret_code;
+ goto CLEANUP_UNMAP;
+ }
+
+ // Set mutexes to pshared - multiprocess-safe
+ ret_code = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
+ if (ret_code != 0) {
+ *error_code = -1 * ret_code;
+ goto CLEANUP_FREEATTR;
+ }
+
+ // Set mutexes to robust - if a process dies while holding a mutex, we'll get
+ // a special error code on the next attempt to lock it.
+ // This should prevent panicing processes from leaving the state unusable.
+ ret_code = pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST);
+ if (ret_code != 0) {
+ *error_code = -1 * ret_code;
+ goto CLEANUP_FREEATTR;
+ }
+
+ // Initialize the mutex that protects the bitmaps using the mutex attributes
+ ret_code = pthread_mutex_init(&(shm->segment_lock), &attr);
+ if (ret_code != 0) {
+ *error_code = -1 * ret_code;
+ goto CLEANUP_FREEATTR;
+ }
+
+ // Initialize all bitmaps to 0 initially
+ // And initialize all semaphores they use
+ for (i = 0; i < num_bitmaps; i++) {
+ shm->locks[i].bitmap = 0;
+ for (j = 0; j < BITMAP_SIZE; j++) {
+ // Initialize each mutex
+ ret_code = pthread_mutex_init(&(shm->locks[i].locks[j]), &attr);
+ if (ret_code != 0) {
+ *error_code = -1 * ret_code;
+ goto CLEANUP_FREEATTR;
+ }
+ }
+ }
+
+ // Close the file descriptor, we're done with it
+ // Ignore errors, it's ok if we leak a single FD and this should only run once
+ close(shm_fd);
+
+ // Destroy the pthread initializer attribute.
+ // Again, ignore errors, this will only run once and we might leak a tiny bit
+ // of memory at worst.
+ pthread_mutexattr_destroy(&attr);
+
+ return shm;
+
+ // Cleanup after an error
+ CLEANUP_FREEATTR:
+ pthread_mutexattr_destroy(&attr);
+ CLEANUP_UNMAP:
+ munmap(shm, shm_size);
+ CLEANUP_UNLINK:
+ close(shm_fd);
+ shm_unlink(path);
+ return NULL;
+}
+
+// Open an existing SHM segment holding libpod locks.
+// num_locks is the number of locks that will be configured in the SHM segment.
+// num_locks cannot be 0.
+// Path is the path to the SHM segment. It must begin with a single / and
+// container no other / characters, and be at most 255 characters including
+// terminating NULL byte.
+// Returns a valid pointer on success or NULL on error.
+// If an error occurs, negative ERRNO values will be written to error_code.
+shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
+ int shm_fd;
+ shm_struct_t *shm;
+ size_t shm_size;
+ uint32_t num_bitmaps;
+
+ if (error_code == NULL) {
+ return NULL;
+ }
+
+ // We need a nonzero number of locks
+ if (num_locks == 0) {
+ *error_code = -1 * EINVAL;
+ return NULL;
+ }
+
+ if (path == NULL) {
+ *error_code = -1 * EINVAL;
+ return NULL;
+ }
+
+ // Calculate the number of bitmaps required
+ num_bitmaps = num_locks / BITMAP_SIZE;
+ if (num_locks % BITMAP_SIZE != 0) {
+ num_bitmaps += 1;
+ }
+
+ // Calculate size of the shm segment
+ shm_size = compute_shm_size(num_bitmaps);
+
+ shm_fd = shm_open(path, O_RDWR, 0600);
+ if (shm_fd < 0) {
+ *error_code = -1 * errno;
+ return NULL;
+ }
+
+ // Map the shared memory in
+ shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
+ if (shm == MAP_FAILED) {
+ *error_code = -1 * errno;
+ }
+
+ // Ignore errors, it's ok if we leak a single FD since this only runs once
+ close(shm_fd);
+
+ // Check if we successfully mmap'd
+ if (shm == MAP_FAILED) {
+ return NULL;
+ }
+
+ // Need to check the SHM to see if it's actually our locks
+ if (shm->magic != MAGIC) {
+ *error_code = -1 * errno;
+ goto CLEANUP;
+ }
+ if (shm->num_locks != (num_bitmaps * BITMAP_SIZE)) {
+ *error_code = -1 * errno;
+ goto CLEANUP;
+ }
+
+ return shm;
+
+ CLEANUP:
+ munmap(shm, shm_size);
+ return NULL;
+}
+
+// Close an open SHM lock struct, unmapping the backing memory.
+// The given shm_struct_t will be rendered unusable as a result.
+// On success, 0 is returned. On failure, negative ERRNO values are returned.
+int32_t close_lock_shm(shm_struct_t *shm) {
+ int ret_code;
+ size_t shm_size;
+
+ // We can't unmap null...
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ shm_size = compute_shm_size(shm->num_bitmaps);
+
+ ret_code = munmap(shm, shm_size);
+
+ if (ret_code != 0) {
+ return -1 * errno;
+ }
+
+ return 0;
+}
+
+// Allocate the first available semaphore
+// Returns a positive integer guaranteed to be less than UINT32_MAX on success,
+// or negative errno values on failure
+// On sucess, the returned integer is the number of the semaphore allocated
+int64_t allocate_semaphore(shm_struct_t *shm) {
+ int ret_code, i;
+ bitmap_t test_map;
+ int64_t sem_number, num_within_bitmap;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Lock the semaphore controlling access to our shared memory
+ ret_code = take_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // Loop through our bitmaps to search for one that is not full
+ for (i = 0; i < shm->num_bitmaps; i++) {
+ if (shm->locks[i].bitmap != 0xFFFFFFFF) {
+ test_map = 0x1;
+ num_within_bitmap = 0;
+ while (test_map != 0) {
+ if ((test_map & shm->locks[i].bitmap) == 0) {
+ // Compute the number of the semaphore we are allocating
+ sem_number = (BITMAP_SIZE * i) + num_within_bitmap;
+ // OR in the bitmap
+ shm->locks[i].bitmap = shm->locks[i].bitmap | test_map;
+
+ // Clear the mutex
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // Return the semaphore we've allocated
+ return sem_number;
+ }
+ test_map = test_map << 1;
+ num_within_bitmap++;
+ }
+ // We should never fall through this loop
+ // TODO maybe an assert() here to panic if we do?
+ }
+ }
+
+ // Clear the mutex
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // All bitmaps are full
+ // We have no available semaphores, report allocation failure
+ return -1 * ENOSPC;
+}
+
+// Deallocate a given semaphore
+// Returns 0 on success, negative ERRNO values on failure
+int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ bitmap_t test_map;
+ int bitmap_index, index_in_bitmap, ret_code, i;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ // Check if the lock index is valid
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ // This should never happen if the sem_index test above succeeded, but better
+ // safe than sorry
+ if (bitmap_index >= shm->num_bitmaps) {
+ return -1 * EFAULT;
+ }
+
+ test_map = 0x1 << index_in_bitmap;
+
+ // Lock the mutex controlling access to our shared memory
+ ret_code = take_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ // Check if the semaphore is allocated
+ if ((test_map & shm->locks[bitmap_index].bitmap) == 0) {
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return -1 * ENOENT;
+ }
+
+ // The semaphore is allocated, clear it
+ // Invert the bitmask we used to test to clear the bit
+ test_map = ~test_map;
+ shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap & test_map;
+
+ ret_code = release_mutex(&(shm->segment_lock));
+ if (ret_code != 0) {
+ return -1 * ret_code;
+ }
+
+ return 0;
+}
+
+// Lock a given semaphore
+// Does not check if the semaphore is allocated - this ensures that, even for
+// removed containers, we can still successfully lock to check status (and
+// subsequently realize they have been removed).
+// Returns 0 on success, -1 on failure
+int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ int bitmap_index, index_in_bitmap, ret_code;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ return -1 * take_mutex(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
+}
+
+// Unlock a given semaphore
+// Does not check if the semaphore is allocated - this ensures that, even for
+// removed containers, we can still successfully lock to check status (and
+// subsequently realize they have been removed).
+// Returns 0 on success, -1 on failure
+int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
+ int bitmap_index, index_in_bitmap, ret_code;
+
+ if (shm == NULL) {
+ return -1 * EINVAL;
+ }
+
+ if (sem_index >= shm->num_locks) {
+ return -1 * EINVAL;
+ }
+
+ bitmap_index = sem_index / BITMAP_SIZE;
+ index_in_bitmap = sem_index % BITMAP_SIZE;
+
+ return -1 * release_mutex(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
+}
diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go
new file mode 100644
index 000000000..87d28e5c1
--- /dev/null
+++ b/libpod/lock/shm/shm_lock.go
@@ -0,0 +1,216 @@
+package shm
+
+// #cgo LDFLAGS: -lrt -lpthread
+// #include <stdlib.h>
+// #include "shm_lock.h"
+// const uint32_t bitmap_size_c = BITMAP_SIZE;
+import "C"
+
+import (
+ "runtime"
+ "syscall"
+ "unsafe"
+
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // BitmapSize is the size of the bitmap used when managing SHM locks.
+ // an SHM lock manager's max locks will be rounded up to a multiple of
+ // this number.
+ BitmapSize uint32 = uint32(C.bitmap_size_c)
+)
+
+// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
+// segment.
+type SHMLocks struct { // nolint
+ lockStruct *C.shm_struct_t
+ maxLocks uint32
+ valid bool
+}
+
+// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
+// semaphores, and returns a struct that can be used to operate on those locks.
+// numLocks must not be 0, and may be rounded up to a multiple of the bitmap
+// size used by the underlying implementation.
+func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
+ if numLocks == 0 {
+ return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
+ }
+
+ locks := new(SHMLocks)
+
+ cPath := C.CString(path)
+ defer C.free(unsafe.Pointer(cPath))
+
+ var errCode C.int
+ lockStruct := C.setup_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
+ if lockStruct == nil {
+ // We got a null pointer, so something errored
+ return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to create %d locks in %s", numLocks, path)
+ }
+
+ locks.lockStruct = lockStruct
+ locks.maxLocks = uint32(lockStruct.num_locks)
+ locks.valid = true
+
+ logrus.Debugf("Initialized SHM lock manager at path %s", path)
+
+ return locks, nil
+}
+
+// OpenSHMLock opens an existing shared-memory segment holding a given number of
+// POSIX semaphores. numLocks must match the number of locks the shared memory
+// segment was created with.
+func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
+ if numLocks == 0 {
+ return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
+ }
+
+ locks := new(SHMLocks)
+
+ cPath := C.CString(path)
+ defer C.free(unsafe.Pointer(cPath))
+
+ var errCode C.int
+ lockStruct := C.open_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
+ if lockStruct == nil {
+ // We got a null pointer, so something errored
+ return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to open %d locks in %s", numLocks, path)
+ }
+
+ locks.lockStruct = lockStruct
+ locks.maxLocks = numLocks
+ locks.valid = true
+
+ return locks, nil
+}
+
+// GetMaxLocks returns the maximum number of locks in the SHM
+func (locks *SHMLocks) GetMaxLocks() uint32 {
+ return locks.maxLocks
+}
+
+// Close closes an existing shared-memory segment.
+// The segment will be rendered unusable after closing.
+// WARNING: If you Close() while there are still locks locked, these locks may
+// fail to release, causing a program freeze.
+// Close() is only intended to be used while testing the locks.
+func (locks *SHMLocks) Close() error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ locks.valid = false
+
+ retCode := C.close_lock_shm(locks.lockStruct)
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// AllocateSemaphore allocates a semaphore from a shared-memory segment for use
+// by a container or pod.
+// Returns the index of the semaphore that was allocated.
+// Allocations past the maximum number of locks given when the SHM segment was
+// created will result in an error, and no semaphore will be allocated.
+func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
+ if !locks.valid {
+ return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ // This returns a U64, so we have the full u32 range available for
+ // semaphore indexes, and can still return error codes.
+ retCode := C.allocate_semaphore(locks.lockStruct)
+ if retCode < 0 {
+ // Negative errno returned
+ return 0, syscall.Errno(-1 * retCode)
+ }
+
+ return uint32(retCode), nil
+}
+
+// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
+// reallocated to another container or pod.
+// The given semaphore must be already allocated, or an error will be returned.
+func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ if sem > locks.maxLocks {
+ return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ }
+
+ retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// LockSemaphore locks the given semaphore.
+// If the semaphore is already locked, LockSemaphore will block until the lock
+// can be acquired.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) LockSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ if sem > locks.maxLocks {
+ return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ }
+
+ // For pthread mutexes, we have to guarantee lock and unlock happen in
+ // the same thread.
+ runtime.LockOSThread()
+
+ retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// UnlockSemaphore unlocks the given semaphore.
+// Unlocking a semaphore that is already unlocked with return EBUSY.
+// There is no requirement that the given semaphore be allocated.
+// This ensures that attempts to lock a container after it has been deleted,
+// but before the caller has queried the database to determine this, will
+// succeed.
+func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
+ if !locks.valid {
+ return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
+ }
+
+ if sem > locks.maxLocks {
+ return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
+ }
+
+ retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem))
+ if retCode < 0 {
+ // Negative errno returned
+ return syscall.Errno(-1 * retCode)
+ }
+
+ // For pthread mutexes, we have to guarantee lock and unlock happen in
+ // the same thread.
+ // OK if we take multiple locks - UnlockOSThread() won't actually unlock
+ // until the number of calls equals the number of calls to
+ // LockOSThread()
+ runtime.UnlockOSThread()
+
+ return nil
+}
diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h
new file mode 100644
index 000000000..8e7e23fb7
--- /dev/null
+++ b/libpod/lock/shm/shm_lock.h
@@ -0,0 +1,46 @@
+#ifndef shm_locks_h_
+#define shm_locks_h_
+
+#include <pthread.h>
+#include <stdint.h>
+
+// Magic number to ensure we open the right SHM segment
+#define MAGIC 0x87D1
+
+// Type for our bitmaps
+typedef uint32_t bitmap_t;
+
+// bitmap size
+#define BITMAP_SIZE (sizeof(bitmap_t) * 8)
+
+// Struct to hold a single bitmap and associated locks
+typedef struct lock_group {
+ bitmap_t bitmap;
+ pthread_mutex_t locks[BITMAP_SIZE];
+} lock_group_t;
+
+// Struct to hold our SHM locks.
+// Unused is required to be 0 in the current implementation. If we ever make
+// changes to this structure in the future, this will be repurposed as a version
+// field.
+typedef struct shm_struct {
+ uint16_t magic;
+ uint16_t unused;
+ pthread_mutex_t segment_lock;
+ uint32_t num_bitmaps;
+ uint32_t num_locks;
+ lock_group_t locks[];
+} shm_struct_t;
+
+static size_t compute_shm_size(uint32_t num_bitmaps);
+static int take_mutex(pthread_mutex_t *mutex);
+static int release_mutex(pthread_mutex_t *mutex);
+shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code);
+shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code);
+int32_t close_lock_shm(shm_struct_t *shm);
+int64_t allocate_semaphore(shm_struct_t *shm);
+int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
+int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
+int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index);
+
+#endif
diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go
new file mode 100644
index 000000000..594eb5d8e
--- /dev/null
+++ b/libpod/lock/shm/shm_lock_test.go
@@ -0,0 +1,278 @@
+package shm
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// All tests here are in the same process, which somewhat limits their utility
+// The big intent of this package it multiprocess locking, which is really hard
+// to test without actually having multiple processes...
+// We can at least verify that the locks work within the local process.
+
+var (
+ // 4 * BITMAP_SIZE to ensure we have to traverse bitmaps
+ numLocks = 4 * BitmapSize
+)
+
+const lockPath = "/libpod_test"
+
+// We need a test main to ensure that the SHM is created before the tests run
+func TestMain(m *testing.M) {
+ shmLock, err := CreateSHMLock(lockPath, numLocks)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err)
+ os.Exit(-1)
+ }
+
+ // Close the SHM - every subsequent test will reopen
+ if err := shmLock.Close(); err != nil {
+ fmt.Fprintf(os.Stderr, "Error closing SHM locks: %v\n", err)
+ os.Exit(-1)
+ }
+
+ exitCode := m.Run()
+
+ // We need to remove the SHM segment to clean up after ourselves
+ os.RemoveAll("/dev/shm/libpod_lock")
+
+ os.Exit(exitCode)
+}
+
+func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
+ locks, err := OpenSHMLock(lockPath, numLocks)
+ if err != nil {
+ t.Fatalf("Error opening locks: %v", err)
+ }
+ defer func() {
+ // Deallocate all locks
+ // Ignore ENOENT (lock is not allocated)
+ var i uint32
+ for i = 0; i < numLocks; i++ {
+ if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT {
+ t.Fatalf("Error deallocating semaphore %d: %v", i, err)
+ }
+ }
+
+ if err := locks.Close(); err != nil {
+ t.Fatalf("Error closing locks: %v", err)
+ }
+ }()
+
+ success := t.Run("locks", func(t *testing.T) {
+ testFunc(t, locks)
+ })
+ if !success {
+ t.Fail()
+ }
+}
+
+// Test that creating an SHM with a bad size rounds up to a good size
+func TestCreateNewSHMBadSizeRoundsUp(t *testing.T) {
+ // Odd number, not a power of 2, should never be a word size on a system
+ lock, err := CreateSHMLock("/test1", 7)
+ assert.NoError(t, err)
+
+ assert.Equal(t, lock.GetMaxLocks(), BitmapSize)
+
+ if err := lock.Close(); err != nil {
+ t.Fatalf("Error closing locks: %v", err)
+ }
+}
+
+// Test that creating an SHM with 0 size fails
+func TestCreateNewSHMZeroSize(t *testing.T) {
+ _, err := CreateSHMLock("/test2", 0)
+ assert.Error(t, err)
+}
+
+// Test that deallocating an unallocated lock errors
+func TestDeallocateUnallocatedLockErrors(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ err := locks.DeallocateSemaphore(0)
+ assert.Error(t, err)
+ })
+}
+
+// Test that unlocking an unlocked lock fails
+func TestUnlockingUnlockedLockFails(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ err := locks.UnlockSemaphore(0)
+ assert.Error(t, err)
+ })
+}
+
+// Test that locking and double-unlocking fails
+func TestDoubleUnlockFails(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ err := locks.LockSemaphore(0)
+ assert.NoError(t, err)
+
+ err = locks.UnlockSemaphore(0)
+ assert.NoError(t, err)
+
+ err = locks.UnlockSemaphore(0)
+ assert.Error(t, err)
+ })
+}
+
+// Test allocating - lock - unlock - deallocate cycle, single lock
+func TestLockLifecycleSingleLock(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ sem, err := locks.AllocateSemaphore()
+ require.NoError(t, err)
+
+ err = locks.LockSemaphore(sem)
+ assert.NoError(t, err)
+
+ err = locks.UnlockSemaphore(sem)
+ assert.NoError(t, err)
+
+ err = locks.DeallocateSemaphore(sem)
+ assert.NoError(t, err)
+ })
+}
+
+// Test allocate two locks returns different locks
+func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ sem1, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ sem2, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ assert.NotEqual(t, sem1, sem2)
+ })
+}
+
+// Test allocate all locks successful and all are unique
+func TestAllocateAllLocksSucceeds(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ sems := make(map[uint32]bool)
+ var i uint32
+ for i = 0; i < numLocks; i++ {
+ sem, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+
+ // Ensure the allocate semaphore is unique
+ _, ok := sems[sem]
+ assert.False(t, ok)
+
+ sems[sem] = true
+ }
+ })
+}
+
+// Test allocating more than the given max fails
+func TestAllocateTooManyLocksFails(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // Allocate all locks
+ var i uint32
+ for i = 0; i < numLocks; i++ {
+ _, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+ }
+
+ // Try and allocate one more
+ _, err := locks.AllocateSemaphore()
+ assert.Error(t, err)
+ })
+}
+
+// Test allocating max locks, deallocating one, and then allocating again succeeds
+func TestAllocateDeallocateCycle(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // Allocate all locks
+ var i uint32
+ for i = 0; i < numLocks; i++ {
+ _, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+ }
+
+ // Now loop through again, deallocating and reallocating.
+ // Each time we free 1 semaphore, allocate again, and make sure
+ // we get the same semaphore back.
+ var j uint32
+ for j = 0; j < numLocks; j++ {
+ err := locks.DeallocateSemaphore(j)
+ assert.NoError(t, err)
+
+ newSem, err := locks.AllocateSemaphore()
+ assert.NoError(t, err)
+ assert.Equal(t, j, newSem)
+ }
+ })
+}
+
+// Test that locks actually lock
+func TestLockSemaphoreActuallyLocks(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ // This entire test is very ugly - lots of sleeps to try and get
+ // things to occur in the right order.
+ // It also doesn't even exercise the multiprocess nature of the
+ // locks.
+
+ // Get the current time
+ startTime := time.Now()
+
+ // Start a goroutine to take the lock and then release it after
+ // a second.
+ go func() {
+ err := locks.LockSemaphore(0)
+ assert.NoError(t, err)
+
+ time.Sleep(1 * time.Second)
+
+ err = locks.UnlockSemaphore(0)
+ assert.NoError(t, err)
+ }()
+
+ // Sleep for a quarter of a second to give the goroutine time
+ // to kick off and grab the lock
+ time.Sleep(250 * time.Millisecond)
+
+ // Take the lock
+ err := locks.LockSemaphore(0)
+ assert.NoError(t, err)
+
+ // Get the current time
+ endTime := time.Now()
+
+ // Verify that at least 1 second has passed since start
+ duration := endTime.Sub(startTime)
+ assert.True(t, duration.Seconds() > 1.0)
+ })
+}
+
+// Test that locking and unlocking two semaphores succeeds
+// Ensures that runtime.LockOSThread() is doing its job
+func TestLockAndUnlockTwoSemaphore(t *testing.T) {
+ runLockTest(t, func(t *testing.T, locks *SHMLocks) {
+ err := locks.LockSemaphore(5)
+ assert.NoError(t, err)
+
+ err = locks.LockSemaphore(6)
+ assert.NoError(t, err)
+
+ err = locks.UnlockSemaphore(6)
+ assert.NoError(t, err)
+
+ // Now yield scheduling
+ // To try and get us on another OS thread
+ runtime.Gosched()
+
+ // And unlock the last semaphore
+ // If we are in a different OS thread, this should fail.
+ // However, runtime.UnlockOSThread() should guarantee we are not
+ err = locks.UnlockSemaphore(5)
+ assert.NoError(t, err)
+ })
+}
diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go
new file mode 100644
index 000000000..94dfd7dd7
--- /dev/null
+++ b/libpod/lock/shm_lock_manager_linux.go
@@ -0,0 +1,102 @@
+// +build linux
+
+package lock
+
+import (
+ "syscall"
+
+ "github.com/containers/libpod/libpod/lock/shm"
+ "github.com/pkg/errors"
+)
+
+// SHMLockManager manages shared memory locks.
+type SHMLockManager struct {
+ locks *shm.SHMLocks
+}
+
+// NewSHMLockManager makes a new SHMLockManager with the given number of locks.
+// Due to the underlying implementation, the exact number of locks created may
+// be greater than the number given here.
+func NewSHMLockManager(path string, numLocks uint32) (Manager, error) {
+ locks, err := shm.CreateSHMLock(path, numLocks)
+ if err != nil {
+ return nil, err
+ }
+
+ manager := new(SHMLockManager)
+ manager.locks = locks
+
+ return manager, nil
+}
+
+// OpenSHMLockManager opens an existing SHMLockManager with the given number of
+// locks.
+func OpenSHMLockManager(path string, numLocks uint32) (Manager, error) {
+ locks, err := shm.OpenSHMLock(path, numLocks)
+ if err != nil {
+ return nil, err
+ }
+
+ manager := new(SHMLockManager)
+ manager.locks = locks
+
+ return manager, nil
+}
+
+// AllocateLock allocates a new lock from the manager.
+func (m *SHMLockManager) AllocateLock() (Locker, error) {
+ semIndex, err := m.locks.AllocateSemaphore()
+ if err != nil {
+ return nil, err
+ }
+
+ lock := new(SHMLock)
+ lock.lockID = semIndex
+ lock.manager = m
+
+ return lock, nil
+}
+
+// RetrieveLock retrieves a lock from the manager given its ID.
+func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
+ lock := new(SHMLock)
+ lock.lockID = id
+ lock.manager = m
+
+ if id >= m.locks.GetMaxLocks() {
+ return nil, errors.Wrapf(syscall.EINVAL, "lock ID %d is too large - max lock size is %d",
+ id, m.locks.GetMaxLocks()-1)
+ }
+
+ return lock, nil
+}
+
+// SHMLock is an individual shared memory lock.
+type SHMLock struct {
+ lockID uint32
+ manager *SHMLockManager
+}
+
+// ID returns the ID of the lock.
+func (l *SHMLock) ID() uint32 {
+ return l.lockID
+}
+
+// Lock acquires the lock.
+func (l *SHMLock) Lock() {
+ if err := l.manager.locks.LockSemaphore(l.lockID); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Unlock releases the lock.
+func (l *SHMLock) Unlock() {
+ if err := l.manager.locks.UnlockSemaphore(l.lockID); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Free releases the lock, allowing it to be reused.
+func (l *SHMLock) Free() error {
+ return l.manager.locks.DeallocateSemaphore(l.lockID)
+}
diff --git a/libpod/lock/shm_lock_manager_unsupported.go b/libpod/lock/shm_lock_manager_unsupported.go
new file mode 100644
index 000000000..a1340fcd1
--- /dev/null
+++ b/libpod/lock/shm_lock_manager_unsupported.go
@@ -0,0 +1,29 @@
+// +build !linux
+
+package lock
+
+import "fmt"
+
+// SHMLockManager is a shared memory lock manager.
+// It is not supported on non-Unix platforms.
+type SHMLockManager struct{}
+
+// NewSHMLockManager is not supported on this platform
+func NewSHMLockManager(numLocks uint32) (Manager, error) {
+ return nil, fmt.Errorf("not supported")
+}
+
+// OpenSHMLockManager is not supported on this platform
+func OpenSHMLockManager(numLocks uint32) (Manager, error) {
+ return nil, fmt.Errorf("not supported")
+}
+
+// AllocateLock is not supported on this platform
+func (m *SHMLockManager) AllocateLock() (Locker, error) {
+ return nil, fmt.Errorf("not supported")
+}
+
+// RetrieveLock is not supported on this platform
+func (m *SHMLockManager) RetrieveLock(id string) (Locker, error) {
+ return nil, fmt.Errorf("not supported")
+}
diff --git a/libpod/pod.go b/libpod/pod.go
index 07f41f5c6..4ce697402 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -3,7 +3,7 @@ package libpod
import (
"time"
- "github.com/containers/storage"
+ "github.com/containers/libpod/libpod/lock"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/pkg/errors"
)
@@ -26,7 +26,7 @@ type Pod struct {
valid bool
runtime *Runtime
- lock storage.Locker
+ lock lock.Locker
}
// PodConfig represents a pod's static configuration
@@ -60,6 +60,9 @@ type PodConfig struct {
// Time pod was created
CreatedTime time.Time `json:"created"`
+
+ // ID of the pod's lock
+ LockID uint32 `json:"lockID"`
}
// podState represents a pod's state
diff --git a/libpod/pod_easyjson.go b/libpod/pod_easyjson.go
index 8ea9a5e72..71862dad0 100644
--- a/libpod/pod_easyjson.go
+++ b/libpod/pod_easyjson.go
@@ -501,6 +501,8 @@ func easyjsonBe091417DecodeGithubComContainersLibpodLibpod4(in *jlexer.Lexer, ou
if data := in.Raw(); in.Ok() {
in.AddError((out.CreatedTime).UnmarshalJSON(data))
}
+ case "lockID":
+ out.LockID = uint32(in.Uint32())
default:
in.SkipRecursive()
}
@@ -675,6 +677,16 @@ func easyjsonBe091417EncodeGithubComContainersLibpodLibpod4(out *jwriter.Writer,
}
out.Raw((in.CreatedTime).MarshalJSON())
}
+ {
+ const prefix string = ",\"lockID\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Uint32(uint32(in.LockID))
+ }
out.RawByte('}')
}
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 39a25c004..0f1f115e8 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -7,14 +7,13 @@ import (
"strings"
"time"
- "github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Creates a new, empty pod
-func newPod(lockDir string, runtime *Runtime) (*Pod, error) {
+func newPod(runtime *Runtime) (*Pod, error) {
pod := new(Pod)
pod.config = new(PodConfig)
pod.config.ID = stringid.GenerateNonCryptoID()
@@ -24,15 +23,6 @@ func newPod(lockDir string, runtime *Runtime) (*Pod, error) {
pod.state = new(podState)
pod.runtime = runtime
- // Path our lock file will reside at
- lockPath := filepath.Join(lockDir, pod.config.ID)
- // Grab a lockfile at the given path
- lock, err := storage.GetLockfile(lockPath)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating lockfile for new pod")
- }
- pod.lock = lock
-
return pod, nil
}
@@ -55,6 +45,8 @@ func (p *Pod) save() error {
}
// Refresh a pod's state after restart
+// This cannot lock any other pod, but may lock individual containers, as those
+// will have refreshed by the time pod refresh runs.
func (p *Pod) refresh() error {
// Need to to an update from the DB to pull potentially-missing state
if err := p.runtime.state.UpdatePod(p); err != nil {
@@ -65,6 +57,13 @@ func (p *Pod) refresh() error {
return ErrPodRemoved
}
+ // Retrieve the pod's lock
+ lock, err := p.runtime.lockManager.RetrieveLock(p.config.LockID)
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving lock for pod %s", p.ID())
+ }
+ p.lock = lock
+
// We need to recreate the pod's cgroup
if p.config.UsePodCgroup {
switch p.runtime.config.CgroupManager {
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 2dfebf565..ab8d02a4f 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "fmt"
"io/ioutil"
"os"
"os/exec"
@@ -11,6 +12,7 @@ import (
is "github.com/containers/image/storage"
"github.com/containers/image/types"
"github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/libpod/lock"
"github.com/containers/libpod/pkg/firewall"
sysreg "github.com/containers/libpod/pkg/registries"
"github.com/containers/libpod/pkg/rootless"
@@ -61,6 +63,14 @@ const (
DefaultInfraImage = "k8s.gcr.io/pause:3.1"
// DefaultInfraCommand to be run in an infra container
DefaultInfraCommand = "/pause"
+
+ // DefaultInitPath is the default path to the container-init binary
+ DefaultInitPath = "/usr/libexec/podman/catatonit"
+
+ // DefaultSHMLockPath is the default path for SHM locks
+ DefaultSHMLockPath = "/libpod_lock"
+ // DefaultRootlessSHMLockPath is the default path for rootless SHM locks
+ DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
)
// A RuntimeOption is a functional option which alters the Runtime created by
@@ -75,7 +85,6 @@ type Runtime struct {
storageService *storageService
imageContext *types.SystemContext
ociRuntime *OCIRuntime
- lockDir string
netPlugin ocicni.CNIPlugin
ociRuntimePath string
conmonPath string
@@ -83,6 +92,7 @@ type Runtime struct {
lock sync.RWMutex
imageRuntime *image.Runtime
firewallBackend firewall.FirewallBackend
+ lockManager lock.Manager
configuredFrom *runtimeConfiguredFrom
}
@@ -122,6 +132,8 @@ type RuntimeConfig struct {
// CGroupManager is the CGroup Manager to use
// Valid values are "cgroupfs" and "systemd"
CgroupManager string `toml:"cgroup_manager"`
+ // InitPath is the path to the container-init binary.
+ InitPath string `toml:"init_path"`
// StaticDir is the path to a persistent directory to store container
// files
StaticDir string `toml:"static_dir"`
@@ -160,6 +172,7 @@ type RuntimeConfig struct {
// and all containers and pods will be visible.
// The default namespace is "".
Namespace string `toml:"namespace,omitempty"`
+
// InfraImage is the image a pod infra container will use to manage namespaces
InfraImage string `toml:"infra_image"`
// InfraCommand is the command run to start up a pod infra container
@@ -174,6 +187,10 @@ type RuntimeConfig struct {
EnablePortReservation bool `toml:"enable_port_reservation"`
// EnableLabeling indicates wether libpod will support container labeling
EnableLabeling bool `toml:"label"`
+
+ // NumLocks is the number of locks to make available for containers and
+ // pods.
+ NumLocks uint32 `toml:"num_locks,omitempty"`
}
// runtimeConfiguredFrom is a struct used during early runtime init to help
@@ -217,6 +234,7 @@ var (
ConmonEnvVars: []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
},
+ InitPath: DefaultInitPath,
CgroupManager: SystemdCgroupsManager,
StaticDir: filepath.Join(storage.DefaultStoreOptions.GraphRoot, "libpod"),
TmpDir: "",
@@ -228,6 +246,7 @@ var (
InfraImage: DefaultInfraImage,
EnablePortReservation: true,
EnableLabeling: true,
+ NumLocks: 2048,
}
)
@@ -610,17 +629,6 @@ func makeRuntime(runtime *Runtime) (err error) {
}
runtime.ociRuntime = ociRuntime
- // Make a directory to hold container lockfiles
- lockDir := filepath.Join(runtime.config.TmpDir, "lock")
- if err := os.MkdirAll(lockDir, 0755); err != nil {
- // The directory is allowed to exist
- if !os.IsExist(err) {
- return errors.Wrapf(err, "error creating runtime lockfiles directory %s",
- lockDir)
- }
- }
- runtime.lockDir = lockDir
-
// Make the per-boot files directory if it does not exist
if err := os.MkdirAll(runtime.config.TmpDir, 0755); err != nil {
// The directory is allowed to exist
@@ -665,6 +673,7 @@ func makeRuntime(runtime *Runtime) (err error) {
// and use it to lock important operations
aliveLock.Lock()
locked := true
+ doRefresh := false
defer func() {
if locked {
aliveLock.Unlock()
@@ -677,22 +686,52 @@ func makeRuntime(runtime *Runtime) (err error) {
// empty state only creates a single file
// As such, it's not really a performance concern
if os.IsNotExist(err) {
- if os.Geteuid() != 0 {
- aliveLock.Unlock()
- locked = false
- if err2 := runtime.refreshRootless(); err2 != nil {
- return err2
- }
- } else {
- if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
- return err2
- }
- }
+ doRefresh = true
} else {
return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
}
}
+ // Set up the lock manager
+ var manager lock.Manager
+ lockPath := DefaultSHMLockPath
+ if rootless.IsRootless() {
+ lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
+ }
+ if doRefresh {
+ // If SHM locks already exist, delete them and reinitialize
+ if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil && !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error deleting existing libpod SHM segment %s", lockPath)
+ }
+
+ manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ return err
+ }
+ } else {
+ manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
+ if err != nil {
+ return err
+ }
+ }
+ runtime.lockManager = manager
+
+ // If we need to refresh the state, do it now - things are guaranteed to
+ // be set up by now.
+ if doRefresh {
+ if os.Geteuid() != 0 {
+ aliveLock.Unlock()
+ locked = false
+ if err2 := runtime.refreshRootless(); err2 != nil {
+ return err2
+ }
+ } else {
+ if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
+ return err2
+ }
+ }
+ }
+
// Mark the runtime as valid - ready to be used, cannot be modified
// further
runtime.valid = true
@@ -788,19 +827,22 @@ func (r *Runtime) refresh(alivePath string) error {
if err != nil {
return errors.Wrapf(err, "error retrieving all pods from state")
}
+ // No locks are taken during pod and container refresh.
+ // Furthermore, the pod and container refresh() functions are not
+ // allowed to take locks themselves.
+ // We cannot assume that any pod or container has a valid lock until
+ // after this function has returned.
+ // The runtime alive lock should suffice to provide mutual exclusion
+ // until this has run.
for _, ctr := range ctrs {
- ctr.lock.Lock()
if err := ctr.refresh(); err != nil {
logrus.Errorf("Error refreshing container %s: %v", ctr.ID(), err)
}
- ctr.lock.Unlock()
}
for _, pod := range pods {
- pod.lock.Lock()
if err := pod.refresh(); err != nil {
logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err)
}
- pod.lock.Unlock()
}
// Create a file indicating the runtime is alive and ready
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index a9095c6d1..e6f2c962f 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -9,7 +9,6 @@ import (
"time"
"github.com/containers/libpod/pkg/rootless"
- "github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@@ -61,15 +60,6 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
ctr.state.BindMounts = make(map[string]string)
- // Path our lock file will reside at
- lockPath := filepath.Join(r.lockDir, ctr.config.ID)
- // Grab a lockfile at the given path
- lock, err := storage.GetLockfile(lockPath)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating lockfile for new container")
- }
- ctr.lock = lock
-
ctr.config.StopTimeout = CtrRemoveTimeout
// Set namespace based on current runtime namespace
@@ -85,6 +75,19 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
}
+ // Allocate a lock for the container
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error allocating lock for new container")
+ }
+ ctr.lock = lock
+ ctr.config.LockID = ctr.lock.ID()
+ logrus.Debugf("Allocated lock %d for container %s", ctr.lock.ID(), ctr.ID())
+
+ ctr.valid = true
+ ctr.state.State = ContainerStateConfigured
+ ctr.runtime = r
+
ctr.valid = true
ctr.state.State = ContainerStateConfigured
@@ -379,6 +382,15 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
}
}
+ // Deallocate the container's lock
+ if err := c.lock.Free(); err != nil {
+ if cleanupErr == nil {
+ cleanupErr = err
+ } else {
+ logrus.Errorf("free container lock: %v", err)
+ }
+ }
+
return cleanupErr
}
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 3d6fad52f..c6d497c0c 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -23,7 +23,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
return nil, ErrRuntimeStopped
}
- pod, err := newPod(r.lockDir, r)
+ pod, err := newPod(r)
if err != nil {
return nil, errors.Wrapf(err, "error creating pod")
}
@@ -48,6 +48,14 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (*Pod,
pod.config.Name = name
}
+ // Allocate a lock for the pod
+ lock, err := r.lockManager.AllocateLock()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error allocating lock for new pod")
+ }
+ pod.lock = lock
+ pod.config.LockID = pod.lock.ID()
+
pod.valid = true
// Check CGroup parent sanity, and set it if it was not set
@@ -239,6 +247,11 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
return err
}
}
+
+ // Free the container's lock
+ if err := ctr.lock.Free(); err != nil {
+ return err
+ }
}
// Remove containers from the state
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index 5cc0938f0..0727cfedf 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -8,7 +8,6 @@ import (
"path/filepath"
"strings"
- "github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
@@ -68,14 +67,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
}
volume.config.MountPoint = fullVolPath
- // Path our lock file will reside at
- lockPath := filepath.Join(r.lockDir, volume.config.Name)
- // Grab a lockfile at the given path
- lock, err := storage.GetLockfile(lockPath)
+ lock, err := r.lockManager.AllocateLock()
if err != nil {
- return nil, errors.Wrapf(err, "error creating lockfile for new volume")
+ return nil, errors.Wrapf(err, "error allocating lock for new volume")
}
volume.lock = lock
+ volume.config.LockID = volume.lock.ID()
volume.valid = true
diff --git a/libpod/state_test.go b/libpod/state_test.go
index 708ce7d4e..ee4201b1c 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -8,16 +8,17 @@ import (
"testing"
"time"
+ "github.com/containers/libpod/libpod/lock"
"github.com/containers/storage"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-// Returns state, tmp directory containing all state files, locks directory
-// (subdirectory of tmp dir), and error
+// Returns state, tmp directory containing all state files, lock manager, and
+// error.
// Closing the state and removing the given tmp directory should be sufficient
-// to clean up
-type emptyStateFunc func() (State, string, string, error)
+// to clean up.
+type emptyStateFunc func() (State, string, lock.Manager, error)
const (
tmpDirPrefix = "libpod_state_test_"
@@ -31,10 +32,10 @@ var (
)
// Get an empty BoltDB state for use in tests
-func getEmptyBoltState() (s State, p string, p2 string, err error) {
+func getEmptyBoltState() (s State, p string, m lock.Manager, err error) {
tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
if err != nil {
- return nil, "", "", err
+ return nil, "", nil, err
}
defer func() {
if err != nil {
@@ -43,30 +44,30 @@ func getEmptyBoltState() (s State, p string, p2 string, err error) {
}()
dbPath := filepath.Join(tmpDir, "db.sql")
- lockDir := filepath.Join(tmpDir, "locks")
- if err := os.Mkdir(lockDir, 0755); err != nil {
- return nil, "", "", err
+ lockManager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ return nil, "", nil, err
}
runtime := new(Runtime)
runtime.config = new(RuntimeConfig)
runtime.config.StorageConfig = storage.StoreOptions{}
- runtime.lockDir = lockDir
+ runtime.lockManager = lockManager
state, err := NewBoltState(dbPath, runtime)
if err != nil {
- return nil, "", "", err
+ return nil, "", nil, err
}
- return state, tmpDir, lockDir, nil
+ return state, tmpDir, lockManager, nil
}
// Get an empty in-memory state for use in tests
-func getEmptyInMemoryState() (s State, p string, p2 string, err error) {
+func getEmptyInMemoryState() (s State, p string, m lock.Manager, err error) {
tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
if err != nil {
- return nil, "", "", err
+ return nil, "", nil, err
}
defer func() {
if err != nil {
@@ -76,17 +77,20 @@ func getEmptyInMemoryState() (s State, p string, p2 string, err error) {
state, err := NewInMemoryState()
if err != nil {
- return nil, "", "", err
+ return nil, "", nil, err
+ }
+
+ lockManager, err := lock.NewInMemoryManager(16)
+ if err != nil {
+ return nil, "", nil, err
}
- // Don't need a separate locks dir as InMemoryState stores nothing on
- // disk
- return state, tmpDir, tmpDir, nil
+ return state, tmpDir, lockManager, nil
}
-func runForAllStates(t *testing.T, testFunc func(*testing.T, State, string)) {
+func runForAllStates(t *testing.T, testFunc func(*testing.T, State, lock.Manager)) {
for stateName, stateFunc := range testedStates {
- state, path, lockPath, err := stateFunc()
+ state, path, manager, err := stateFunc()
if err != nil {
t.Fatalf("Error initializing state %s: %v", stateName, err)
}
@@ -94,7 +98,7 @@ func runForAllStates(t *testing.T, testFunc func(*testing.T, State, string)) {
defer state.Close()
success := t.Run(stateName, func(t *testing.T) {
- testFunc(t, state, lockPath)
+ testFunc(t, state, manager)
})
if !success {
t.Fail()
@@ -103,8 +107,8 @@ func runForAllStates(t *testing.T, testFunc func(*testing.T, State, string)) {
}
func TestAddAndGetContainer(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -118,10 +122,10 @@ func TestAddAndGetContainer(t *testing.T) {
}
func TestAddAndGetContainerFromMultiple(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr1)
@@ -138,8 +142,8 @@ func TestAddAndGetContainerFromMultiple(t *testing.T) {
}
func TestGetContainerPodSameIDFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -151,17 +155,17 @@ func TestGetContainerPodSameIDFails(t *testing.T) {
}
func TestAddInvalidContainerFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
err := state.AddContainer(&Container{config: &Config{ID: "1234"}})
assert.Error(t, err)
})
}
func TestAddDuplicateCtrIDFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestContainer(testCtr1.ID(), "test2", lockPath)
+ testCtr2, err := getTestContainer(testCtr1.ID(), "test2", manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr1)
@@ -177,10 +181,10 @@ func TestAddDuplicateCtrIDFails(t *testing.T) {
}
func TestAddDuplicateCtrNameFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestContainer(strings.Repeat("2", 32), testCtr1.Name(), lockPath)
+ testCtr2, err := getTestContainer(strings.Repeat("2", 32), testCtr1.Name(), manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr1)
@@ -196,10 +200,10 @@ func TestAddDuplicateCtrNameFails(t *testing.T) {
}
func TestAddCtrPodDupIDFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestContainer(testPod.ID(), "testCtr", lockPath)
+ testCtr, err := getTestContainer(testPod.ID(), "testCtr", manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -215,10 +219,10 @@ func TestAddCtrPodDupIDFails(t *testing.T) {
}
func TestAddCtrPodDupNameFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestContainer(strings.Repeat("2", 32), testPod.Name(), lockPath)
+ testCtr, err := getTestContainer(strings.Repeat("2", 32), testPod.Name(), manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -234,11 +238,11 @@ func TestAddCtrPodDupNameFails(t *testing.T) {
}
func TestAddCtrInPodFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -256,16 +260,16 @@ func TestAddCtrInPodFails(t *testing.T) {
}
func TestAddCtrDepInPodFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.UserNsCtr = testCtr1.ID()
@@ -288,10 +292,10 @@ func TestAddCtrDepInPodFails(t *testing.T) {
}
func TestAddCtrDepInSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr2.config.UserNsCtr = testCtr1.config.ID
@@ -312,10 +316,10 @@ func TestAddCtrDepInSameNamespaceSucceeds(t *testing.T) {
}
func TestAddCtrDepInDifferentNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr2.config.UserNsCtr = testCtr1.config.ID
@@ -338,8 +342,8 @@ func TestAddCtrDepInDifferentNamespaceFails(t *testing.T) {
}
func TestAddCtrSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -357,8 +361,8 @@ func TestAddCtrSameNamespaceSucceeds(t *testing.T) {
}
func TestAddCtrDifferentNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -377,22 +381,22 @@ func TestAddCtrDifferentNamespaceFails(t *testing.T) {
}
func TestGetNonexistentContainerFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.Container("does not exist")
assert.Error(t, err)
})
}
func TestGetContainerWithEmptyIDFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.Container("")
assert.Error(t, err)
})
}
func TestGetContainerInDifferentNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test2"
@@ -408,8 +412,8 @@ func TestGetContainerInDifferentNamespaceFails(t *testing.T) {
}
func TestGetContainerInSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -427,8 +431,8 @@ func TestGetContainerInSameNamespaceSucceeds(t *testing.T) {
}
func TestGetContainerInNamespaceWhileNotInNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -444,22 +448,22 @@ func TestGetContainerInNamespaceWhileNotInNamespaceSucceeds(t *testing.T) {
}
func TestLookupContainerWithEmptyIDFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.LookupContainer("")
assert.Error(t, err)
})
}
func TestLookupNonexistentContainerFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.LookupContainer("does not exist")
assert.Error(t, err)
})
}
func TestLookupContainerByFullID(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -473,8 +477,8 @@ func TestLookupContainerByFullID(t *testing.T) {
}
func TestLookupContainerByUniquePartialID(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -488,10 +492,10 @@ func TestLookupContainerByUniquePartialID(t *testing.T) {
}
func TestLookupContainerByNonUniquePartialIDFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestContainer(strings.Repeat("0", 32), "test1", lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestContainer(strings.Repeat("0", 32), "test1", manager)
assert.NoError(t, err)
- testCtr2, err := getTestContainer(strings.Repeat("0", 31)+"1", "test2", lockPath)
+ testCtr2, err := getTestContainer(strings.Repeat("0", 31)+"1", "test2", manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr1)
@@ -506,8 +510,8 @@ func TestLookupContainerByNonUniquePartialIDFails(t *testing.T) {
}
func TestLookupContainerByName(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -521,8 +525,8 @@ func TestLookupContainerByName(t *testing.T) {
}
func TestLookupCtrByPodNameFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -534,8 +538,8 @@ func TestLookupCtrByPodNameFails(t *testing.T) {
}
func TestLookupCtrByPodIDFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -547,8 +551,8 @@ func TestLookupCtrByPodIDFails(t *testing.T) {
}
func TestLookupCtrInSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -566,8 +570,8 @@ func TestLookupCtrInSameNamespaceSucceeds(t *testing.T) {
}
func TestLookupCtrInDifferentNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -583,11 +587,11 @@ func TestLookupCtrInDifferentNamespaceFails(t *testing.T) {
}
func TestLookupContainerMatchInDifferentNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestContainer(strings.Repeat("0", 32), "test1", lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestContainer(strings.Repeat("0", 32), "test1", manager)
assert.NoError(t, err)
testCtr1.config.Namespace = "test2"
- testCtr2, err := getTestContainer(strings.Repeat("0", 31)+"1", "test2", lockPath)
+ testCtr2, err := getTestContainer(strings.Repeat("0", 31)+"1", "test2", manager)
assert.NoError(t, err)
testCtr2.config.Namespace = "test1"
@@ -607,14 +611,14 @@ func TestLookupContainerMatchInDifferentNamespaceSucceeds(t *testing.T) {
}
func TestHasContainerEmptyIDFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.HasContainer("")
assert.Error(t, err)
})
}
func TestHasContainerNoSuchContainerReturnsFalse(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
exists, err := state.HasContainer("does not exist")
assert.NoError(t, err)
assert.False(t, exists)
@@ -622,8 +626,8 @@ func TestHasContainerNoSuchContainerReturnsFalse(t *testing.T) {
}
func TestHasContainerFindsContainer(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -636,8 +640,8 @@ func TestHasContainerFindsContainer(t *testing.T) {
}
func TestHasContainerPodIDIsFalse(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -650,8 +654,8 @@ func TestHasContainerPodIDIsFalse(t *testing.T) {
}
func TestHasContainerSameNamespaceIsTrue(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -668,8 +672,8 @@ func TestHasContainerSameNamespaceIsTrue(t *testing.T) {
}
func TestHasContainerDifferentNamespaceIsFalse(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -686,8 +690,8 @@ func TestHasContainerDifferentNamespaceIsFalse(t *testing.T) {
}
func TestSaveAndUpdateContainer(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -711,8 +715,8 @@ func TestSaveAndUpdateContainer(t *testing.T) {
}
func TestSaveAndUpdateContainerSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -740,8 +744,8 @@ func TestSaveAndUpdateContainerSameNamespaceSucceeds(t *testing.T) {
}
func TestUpdateContainerNotInDatabaseReturnsError(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.UpdateContainer(testCtr)
@@ -751,15 +755,15 @@ func TestUpdateContainerNotInDatabaseReturnsError(t *testing.T) {
}
func TestUpdateInvalidContainerReturnsError(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
err := state.UpdateContainer(&Container{config: &Config{ID: "1234"}})
assert.Error(t, err)
})
}
func TestUpdateContainerNotInNamespaceReturnsError(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -775,15 +779,15 @@ func TestUpdateContainerNotInNamespaceReturnsError(t *testing.T) {
}
func TestSaveInvalidContainerReturnsError(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
err := state.SaveContainer(&Container{config: &Config{ID: "1234"}})
assert.Error(t, err)
})
}
func TestSaveContainerNotInStateReturnsError(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.SaveContainer(testCtr)
@@ -793,8 +797,8 @@ func TestSaveContainerNotInStateReturnsError(t *testing.T) {
}
func TestSaveContainerNotInNamespaceReturnsError(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -810,8 +814,8 @@ func TestSaveContainerNotInNamespaceReturnsError(t *testing.T) {
}
func TestRemoveContainer(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -831,8 +835,8 @@ func TestRemoveContainer(t *testing.T) {
}
func TestRemoveNonexistantContainerFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.RemoveContainer(testCtr)
@@ -842,8 +846,8 @@ func TestRemoveNonexistantContainerFails(t *testing.T) {
}
func TestRemoveContainerNotInNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -869,7 +873,7 @@ func TestRemoveContainerNotInNamespaceFails(t *testing.T) {
}
func TestGetAllContainersOnNewStateIsEmpty(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
ctrs, err := state.AllContainers()
assert.NoError(t, err)
assert.Equal(t, 0, len(ctrs))
@@ -877,8 +881,8 @@ func TestGetAllContainersOnNewStateIsEmpty(t *testing.T) {
}
func TestGetAllContainersWithOneContainer(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -893,10 +897,10 @@ func TestGetAllContainersWithOneContainer(t *testing.T) {
}
func TestGetAllContainersTwoContainers(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr1)
@@ -912,8 +916,8 @@ func TestGetAllContainersTwoContainers(t *testing.T) {
}
func TestGetAllContainersNoContainerInNamespace(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -930,13 +934,13 @@ func TestGetAllContainersNoContainerInNamespace(t *testing.T) {
}
func TestGetContainerOneContainerInNamespace(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr1.config.Namespace = "test1"
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr1)
@@ -956,15 +960,15 @@ func TestGetContainerOneContainerInNamespace(t *testing.T) {
}
func TestContainerInUseInvalidContainer(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.ContainerInUse(&Container{})
assert.Error(t, err)
})
}
func TestContainerInUseCtrNotInState(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
_, err = state.ContainerInUse(testCtr)
assert.Error(t, err)
@@ -972,8 +976,8 @@ func TestContainerInUseCtrNotInState(t *testing.T) {
}
func TestContainerInUseCtrNotInNamespace(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -989,10 +993,10 @@ func TestContainerInUseCtrNotInNamespace(t *testing.T) {
}
func TestContainerInUseOneContainer(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr2.config.UserNsCtr = testCtr1.config.ID
@@ -1011,12 +1015,12 @@ func TestContainerInUseOneContainer(t *testing.T) {
}
func TestContainerInUseTwoContainers(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
- testCtr3, err := getTestCtrN("3", lockPath)
+ testCtr3, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.UserNsCtr = testCtr1.config.ID
@@ -1038,10 +1042,10 @@ func TestContainerInUseTwoContainers(t *testing.T) {
}
func TestContainerInUseOneContainerMultipleDependencies(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr2.config.UserNsCtr = testCtr1.config.ID
@@ -1061,10 +1065,10 @@ func TestContainerInUseOneContainerMultipleDependencies(t *testing.T) {
}
func TestContainerInUseGenericDependency(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr2.config.Dependencies = []string{testCtr1.config.ID}
@@ -1083,12 +1087,12 @@ func TestContainerInUseGenericDependency(t *testing.T) {
}
func TestContainerInUseMultipleGenericDependencies(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
- testCtr3, err := getTestCtrN("3", lockPath)
+ testCtr3, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr3.config.Dependencies = []string{testCtr1.config.ID, testCtr2.config.ID}
@@ -1115,10 +1119,10 @@ func TestContainerInUseMultipleGenericDependencies(t *testing.T) {
}
func TestContainerInUseGenericAndNamespaceDependencies(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr2.config.Dependencies = []string{testCtr1.config.ID}
@@ -1138,10 +1142,10 @@ func TestContainerInUseGenericAndNamespaceDependencies(t *testing.T) {
}
func TestCannotRemoveContainerWithDependency(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr2.config.UserNsCtr = testCtr1.config.ID
@@ -1162,10 +1166,10 @@ func TestCannotRemoveContainerWithDependency(t *testing.T) {
}
func TestCannotRemoveContainerWithGenericDependency(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr2.config.Dependencies = []string{testCtr1.config.ID}
@@ -1186,10 +1190,10 @@ func TestCannotRemoveContainerWithGenericDependency(t *testing.T) {
}
func TestCanRemoveContainerAfterDependencyRemoved(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr2.config.UserNsCtr = testCtr1.ID()
@@ -1213,10 +1217,10 @@ func TestCanRemoveContainerAfterDependencyRemoved(t *testing.T) {
}
func TestCanRemoveContainerAfterDependencyRemovedDuplicate(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr1, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr1, err := getTestCtr1(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtr2(lockPath)
+ testCtr2, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr2.config.UserNsCtr = testCtr1.ID()
@@ -1241,11 +1245,11 @@ func TestCanRemoveContainerAfterDependencyRemovedDuplicate(t *testing.T) {
}
func TestCannotUsePodAsDependency(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
- testPod, err := getTestPod2(lockPath)
+ testPod, err := getTestPod2(manager)
assert.NoError(t, err)
testCtr.config.UserNsCtr = testPod.ID()
@@ -1263,8 +1267,8 @@ func TestCannotUsePodAsDependency(t *testing.T) {
}
func TestCannotUseBadIDAsDependency(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.UserNsCtr = strings.Repeat("5", 32)
@@ -1279,8 +1283,8 @@ func TestCannotUseBadIDAsDependency(t *testing.T) {
}
func TestCannotUseBadIDAsGenericDependency(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
testCtr.config.Dependencies = []string{strings.Repeat("5", 32)}
@@ -1295,22 +1299,22 @@ func TestCannotUseBadIDAsGenericDependency(t *testing.T) {
}
func TestGetPodDoesNotExist(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.Pod("doesnotexist")
assert.Error(t, err)
})
}
func TestGetPodEmptyID(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.Pod("")
assert.Error(t, err)
})
}
func TestGetPodOnePod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -1324,11 +1328,11 @@ func TestGetPodOnePod(t *testing.T) {
}
func TestGetOnePodFromTwo(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod1, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod1, err := getTestPod1(manager)
assert.NoError(t, err)
- testPod2, err := getTestPod2(lockPath)
+ testPod2, err := getTestPod2(manager)
assert.NoError(t, err)
err = state.AddPod(testPod1)
@@ -1345,11 +1349,11 @@ func TestGetOnePodFromTwo(t *testing.T) {
}
func TestGetNotExistPodWithPods(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod1, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod1, err := getTestPod1(manager)
assert.NoError(t, err)
- testPod2, err := getTestPod2(lockPath)
+ testPod2, err := getTestPod2(manager)
assert.NoError(t, err)
err = state.AddPod(testPod1)
@@ -1364,8 +1368,8 @@ func TestGetNotExistPodWithPods(t *testing.T) {
}
func TestGetPodByCtrID(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -1377,8 +1381,8 @@ func TestGetPodByCtrID(t *testing.T) {
}
func TestGetPodInNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -1396,8 +1400,8 @@ func TestGetPodInNamespaceSucceeds(t *testing.T) {
}
func TestGetPodPodNotInNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -1413,22 +1417,22 @@ func TestGetPodPodNotInNamespaceFails(t *testing.T) {
}
func TestLookupPodEmptyID(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.LookupPod("")
assert.Error(t, err)
})
}
func TestLookupNotExistPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.LookupPod("doesnotexist")
assert.Error(t, err)
})
}
func TestLookupPodFullID(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -1442,8 +1446,8 @@ func TestLookupPodFullID(t *testing.T) {
}
func TestLookupPodUniquePartialID(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -1457,11 +1461,11 @@ func TestLookupPodUniquePartialID(t *testing.T) {
}
func TestLookupPodNonUniquePartialID(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod1, err := getTestPod(strings.Repeat("1", 32), "test1", lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod1, err := getTestPod(strings.Repeat("1", 32), "test1", manager)
assert.NoError(t, err)
- testPod2, err := getTestPod(strings.Repeat("1", 31)+"2", "test2", lockPath)
+ testPod2, err := getTestPod(strings.Repeat("1", 31)+"2", "test2", manager)
assert.NoError(t, err)
err = state.AddPod(testPod1)
@@ -1476,8 +1480,8 @@ func TestLookupPodNonUniquePartialID(t *testing.T) {
}
func TestLookupPodByName(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -1491,8 +1495,8 @@ func TestLookupPodByName(t *testing.T) {
}
func TestLookupPodByCtrID(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -1504,8 +1508,8 @@ func TestLookupPodByCtrID(t *testing.T) {
}
func TestLookupPodByCtrName(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -1517,8 +1521,8 @@ func TestLookupPodByCtrName(t *testing.T) {
}
func TestLookupPodInSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -1536,8 +1540,8 @@ func TestLookupPodInSameNamespaceSucceeds(t *testing.T) {
}
func TestLookupPodInDifferentNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -1553,13 +1557,13 @@ func TestLookupPodInDifferentNamespaceFails(t *testing.T) {
}
func TestLookupPodOneInDifferentNamespaceFindsRightPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod1, err := getTestPod(strings.Repeat("1", 32), "test1", lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod1, err := getTestPod(strings.Repeat("1", 32), "test1", manager)
assert.NoError(t, err)
testPod1.config.Namespace = "test1"
- testPod2, err := getTestPod(strings.Repeat("1", 31)+"2", "test2", lockPath)
+ testPod2, err := getTestPod(strings.Repeat("1", 31)+"2", "test2", manager)
assert.NoError(t, err)
testPod2.config.Namespace = "test2"
@@ -1580,14 +1584,14 @@ func TestLookupPodOneInDifferentNamespaceFindsRightPod(t *testing.T) {
}
func TestHasPodEmptyIDErrors(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.HasPod("")
assert.Error(t, err)
})
}
func TestHasPodNoSuchPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
exist, err := state.HasPod("notexist")
assert.NoError(t, err)
assert.False(t, exist)
@@ -1595,8 +1599,8 @@ func TestHasPodNoSuchPod(t *testing.T) {
}
func TestHasPodWrongIDFalse(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -1609,8 +1613,8 @@ func TestHasPodWrongIDFalse(t *testing.T) {
}
func TestHasPodRightIDTrue(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -1623,8 +1627,8 @@ func TestHasPodRightIDTrue(t *testing.T) {
}
func TestHasPodCtrIDFalse(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -1637,8 +1641,8 @@ func TestHasPodCtrIDFalse(t *testing.T) {
}
func TestHasPodSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -1655,8 +1659,8 @@ func TestHasPodSameNamespaceSucceeds(t *testing.T) {
}
func TestHasPodDifferentNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -1673,15 +1677,15 @@ func TestHasPodDifferentNamespaceFails(t *testing.T) {
}
func TestAddPodInvalidPodErrors(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
err := state.AddPod(&Pod{config: &PodConfig{}})
assert.Error(t, err)
})
}
func TestAddPodValidPodSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -1696,11 +1700,11 @@ func TestAddPodValidPodSucceeds(t *testing.T) {
}
func TestAddPodDuplicateIDFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod1, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod1, err := getTestPod1(manager)
assert.NoError(t, err)
- testPod2, err := getTestPod(testPod1.ID(), "testpod2", lockPath)
+ testPod2, err := getTestPod(testPod1.ID(), "testpod2", manager)
assert.NoError(t, err)
err = state.AddPod(testPod1)
@@ -1716,11 +1720,11 @@ func TestAddPodDuplicateIDFails(t *testing.T) {
}
func TestAddPodDuplicateNameFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod1, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod1, err := getTestPod1(manager)
assert.NoError(t, err)
- testPod2, err := getTestPod(strings.Repeat("2", 32), testPod1.Name(), lockPath)
+ testPod2, err := getTestPod(strings.Repeat("2", 32), testPod1.Name(), manager)
assert.NoError(t, err)
err = state.AddPod(testPod1)
@@ -1736,11 +1740,11 @@ func TestAddPodDuplicateNameFails(t *testing.T) {
}
func TestAddPodNonDuplicateSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod1, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod1, err := getTestPod1(manager)
assert.NoError(t, err)
- testPod2, err := getTestPod2(lockPath)
+ testPod2, err := getTestPod2(manager)
assert.NoError(t, err)
err = state.AddPod(testPod1)
@@ -1756,11 +1760,11 @@ func TestAddPodNonDuplicateSucceeds(t *testing.T) {
}
func TestAddPodCtrIDConflictFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
- testPod, err := getTestPod(testCtr.ID(), "testpod1", lockPath)
+ testPod, err := getTestPod(testCtr.ID(), "testpod1", manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -1776,11 +1780,11 @@ func TestAddPodCtrIDConflictFails(t *testing.T) {
}
func TestAddPodCtrNameConflictFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
- testPod, err := getTestPod(strings.Repeat("3", 32), testCtr.Name(), lockPath)
+ testPod, err := getTestPod(strings.Repeat("3", 32), testCtr.Name(), manager)
assert.NoError(t, err)
err = state.AddContainer(testCtr)
@@ -1796,8 +1800,8 @@ func TestAddPodCtrNameConflictFails(t *testing.T) {
}
func TestAddPodSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -1816,8 +1820,8 @@ func TestAddPodSameNamespaceSucceeds(t *testing.T) {
}
func TestAddPodDifferentNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -1836,15 +1840,15 @@ func TestAddPodDifferentNamespaceFails(t *testing.T) {
}
func TestRemovePodInvalidPodErrors(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
err := state.RemovePod(&Pod{config: &PodConfig{}})
assert.Error(t, err)
})
}
func TestRemovePodNotInStateFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.RemovePod(testPod)
@@ -1854,8 +1858,8 @@ func TestRemovePodNotInStateFails(t *testing.T) {
}
func TestRemovePodSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -1871,11 +1875,11 @@ func TestRemovePodSucceeds(t *testing.T) {
}
func TestRemovePodFromPods(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod1, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod1, err := getTestPod1(manager)
assert.NoError(t, err)
- testPod2, err := getTestPod2(lockPath)
+ testPod2, err := getTestPod2(manager)
assert.NoError(t, err)
err = state.AddPod(testPod1)
@@ -1896,11 +1900,11 @@ func TestRemovePodFromPods(t *testing.T) {
}
func TestRemovePodNotEmptyFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -1920,11 +1924,11 @@ func TestRemovePodNotEmptyFails(t *testing.T) {
}
func TestRemovePodAfterEmptySucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -1947,8 +1951,8 @@ func TestRemovePodAfterEmptySucceeds(t *testing.T) {
}
func TestRemovePodNotInNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -1970,7 +1974,7 @@ func TestRemovePodNotInNamespaceFails(t *testing.T) {
}
func TestAllPodsEmptyOnEmptyState(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
allPods, err := state.AllPods()
assert.NoError(t, err)
assert.Equal(t, 0, len(allPods))
@@ -1978,8 +1982,8 @@ func TestAllPodsEmptyOnEmptyState(t *testing.T) {
}
func TestAllPodsFindsPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -1994,14 +1998,14 @@ func TestAllPodsFindsPod(t *testing.T) {
}
func TestAllPodsMultiplePods(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod1, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod1, err := getTestPod1(manager)
assert.NoError(t, err)
- testPod2, err := getTestPod2(lockPath)
+ testPod2, err := getTestPod2(manager)
assert.NoError(t, err)
- testPod3, err := getTestPodN("3", lockPath)
+ testPod3, err := getTestPodN("3", manager)
assert.NoError(t, err)
allPods1, err := state.AllPods()
@@ -2032,8 +2036,8 @@ func TestAllPodsMultiplePods(t *testing.T) {
}
func TestAllPodsPodInDifferentNamespaces(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -2050,13 +2054,13 @@ func TestAllPodsPodInDifferentNamespaces(t *testing.T) {
}
func TestAllPodsOnePodInDifferentNamespace(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod1, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod1, err := getTestPod1(manager)
assert.NoError(t, err)
testPod1.config.Namespace = "test1"
- testPod2, err := getTestPod2(lockPath)
+ testPod2, err := getTestPod2(manager)
assert.NoError(t, err)
testPod2.config.Namespace = "test2"
@@ -2078,15 +2082,15 @@ func TestAllPodsOnePodInDifferentNamespace(t *testing.T) {
}
func TestPodHasContainerNoSuchPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.PodHasContainer(&Pod{config: &PodConfig{}}, strings.Repeat("0", 32))
assert.Error(t, err)
})
}
func TestPodHasContainerEmptyCtrID(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -2098,8 +2102,8 @@ func TestPodHasContainerEmptyCtrID(t *testing.T) {
}
func TestPodHasContainerNoSuchCtr(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -2112,11 +2116,11 @@ func TestPodHasContainerNoSuchCtr(t *testing.T) {
}
func TestPodHasContainerCtrNotInPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -2132,11 +2136,11 @@ func TestPodHasContainerCtrNotInPod(t *testing.T) {
}
func TestPodHasContainerSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -2154,8 +2158,8 @@ func TestPodHasContainerSucceeds(t *testing.T) {
}
func TestPodHasContainerPodNotInNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -2171,15 +2175,15 @@ func TestPodHasContainerPodNotInNamespaceFails(t *testing.T) {
}
func TestPodContainersByIDInvalidPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.PodContainersByID(&Pod{config: &PodConfig{}})
assert.Error(t, err)
})
}
func TestPodContainerdByIDPodNotInState(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
_, err = state.PodContainersByID(testPod)
@@ -2189,8 +2193,8 @@ func TestPodContainerdByIDPodNotInState(t *testing.T) {
}
func TestPodContainersByIDEmptyPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -2203,11 +2207,11 @@ func TestPodContainersByIDEmptyPod(t *testing.T) {
}
func TestPodContainersByIDOneContainer(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -2226,19 +2230,19 @@ func TestPodContainersByIDOneContainer(t *testing.T) {
}
func TestPodContainersByIDMultipleContainers(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
- testCtr3, err := getTestCtrN("4", lockPath)
+ testCtr3, err := getTestCtrN("4", manager)
assert.NoError(t, err)
testCtr3.config.Pod = testPod.ID()
@@ -2273,8 +2277,8 @@ func TestPodContainersByIDMultipleContainers(t *testing.T) {
}
func TestPodContainerByIDPodNotInNamespace(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -2290,15 +2294,15 @@ func TestPodContainerByIDPodNotInNamespace(t *testing.T) {
}
func TestPodContainersInvalidPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
_, err := state.PodContainers(&Pod{config: &PodConfig{}})
assert.Error(t, err)
})
}
func TestPodContainersPodNotInState(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
_, err = state.PodContainers(testPod)
@@ -2308,8 +2312,8 @@ func TestPodContainersPodNotInState(t *testing.T) {
}
func TestPodContainersEmptyPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -2322,11 +2326,11 @@ func TestPodContainersEmptyPod(t *testing.T) {
}
func TestPodContainersOneContainer(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -2346,19 +2350,19 @@ func TestPodContainersOneContainer(t *testing.T) {
}
func TestPodContainersMultipleContainers(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
- testCtr3, err := getTestCtrN("4", lockPath)
+ testCtr3, err := getTestCtrN("4", manager)
assert.NoError(t, err)
testCtr3.config.Pod = testPod.ID()
@@ -2393,8 +2397,8 @@ func TestPodContainersMultipleContainers(t *testing.T) {
}
func TestPodContainersPodNotInNamespace(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -2410,15 +2414,15 @@ func TestPodContainersPodNotInNamespace(t *testing.T) {
}
func TestRemovePodContainersInvalidPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
err := state.RemovePodContainers(&Pod{config: &PodConfig{}})
assert.Error(t, err)
})
}
func TestRemovePodContainersPodNotInState(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.RemovePodContainers(testPod)
@@ -2428,8 +2432,8 @@ func TestRemovePodContainersPodNotInState(t *testing.T) {
}
func TestRemovePodContainersNoContainers(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -2445,11 +2449,11 @@ func TestRemovePodContainersNoContainers(t *testing.T) {
}
func TestRemovePodContainersOneContainer(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -2469,15 +2473,15 @@ func TestRemovePodContainersOneContainer(t *testing.T) {
}
func TestRemovePodContainersPreservesCtrOutsidePod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -2503,15 +2507,15 @@ func TestRemovePodContainersPreservesCtrOutsidePod(t *testing.T) {
}
func TestRemovePodContainersTwoContainers(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
@@ -2534,15 +2538,15 @@ func TestRemovePodContainersTwoContainers(t *testing.T) {
}
func TestRemovePodContainerDependencyInPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
testCtr2.config.IPCNsCtr = testCtr1.ID()
@@ -2566,8 +2570,8 @@ func TestRemovePodContainerDependencyInPod(t *testing.T) {
}
func TestRemoveContainersNotInNamespace(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -2583,8 +2587,8 @@ func TestRemoveContainersNotInNamespace(t *testing.T) {
}
func TestAddContainerToPodInvalidPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.AddContainerToPod(&Pod{config: &PodConfig{}}, testCtr)
@@ -2593,8 +2597,8 @@ func TestAddContainerToPodInvalidPod(t *testing.T) {
}
func TestAddContainerToPodInvalidCtr(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -2610,11 +2614,11 @@ func TestAddContainerToPodInvalidCtr(t *testing.T) {
}
func TestAddContainerToPodPodNotInState(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -2625,11 +2629,11 @@ func TestAddContainerToPodPodNotInState(t *testing.T) {
}
func TestAddContainerToPodSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -2653,15 +2657,15 @@ func TestAddContainerToPodSucceeds(t *testing.T) {
}
func TestAddContainerToPodTwoContainers(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
@@ -2685,15 +2689,15 @@ func TestAddContainerToPodTwoContainers(t *testing.T) {
}
func TestAddContainerToPodWithAddContainer(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -2718,14 +2722,14 @@ func TestAddContainerToPodWithAddContainer(t *testing.T) {
}
func TestAddContainerToPodCtrIDConflict(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
- testCtr2, err := getTestContainer(testCtr1.ID(), "testCtr3", lockPath)
+ testCtr2, err := getTestContainer(testCtr1.ID(), "testCtr3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
@@ -2749,14 +2753,14 @@ func TestAddContainerToPodCtrIDConflict(t *testing.T) {
}
func TestAddContainerToPodCtrNameConflict(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
- testCtr2, err := getTestContainer(strings.Repeat("4", 32), testCtr1.Name(), lockPath)
+ testCtr2, err := getTestContainer(strings.Repeat("4", 32), testCtr1.Name(), manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
@@ -2780,11 +2784,11 @@ func TestAddContainerToPodCtrNameConflict(t *testing.T) {
}
func TestAddContainerToPodPodIDConflict(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestContainer(testPod.ID(), "testCtr", lockPath)
+ testCtr, err := getTestContainer(testPod.ID(), "testCtr", manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -2805,11 +2809,11 @@ func TestAddContainerToPodPodIDConflict(t *testing.T) {
}
func TestAddContainerToPodPodNameConflict(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestContainer(strings.Repeat("2", 32), testPod.Name(), lockPath)
+ testCtr, err := getTestContainer(strings.Repeat("2", 32), testPod.Name(), manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -2830,15 +2834,15 @@ func TestAddContainerToPodPodNameConflict(t *testing.T) {
}
func TestAddContainerToPodAddsDependencies(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
testCtr2.config.IPCNsCtr = testCtr1.ID()
@@ -2860,11 +2864,11 @@ func TestAddContainerToPodAddsDependencies(t *testing.T) {
}
func TestAddContainerToPodPodDependencyFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
testCtr.config.IPCNsCtr = testPod.ID()
@@ -2882,11 +2886,11 @@ func TestAddContainerToPodPodDependencyFails(t *testing.T) {
}
func TestAddContainerToPodBadDependencyFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
testCtr.config.IPCNsCtr = strings.Repeat("8", 32)
@@ -2904,14 +2908,14 @@ func TestAddContainerToPodBadDependencyFails(t *testing.T) {
}
func TestAddContainerToPodDependencyOutsidePodFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
testCtr2.config.IPCNsCtr = testCtr1.ID()
@@ -2940,17 +2944,17 @@ func TestAddContainerToPodDependencyOutsidePodFails(t *testing.T) {
}
func TestAddContainerToPodDependencyInSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
testCtr1.config.Namespace = "test1"
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
testCtr2.config.IPCNsCtr = testCtr1.ID()
@@ -2973,17 +2977,17 @@ func TestAddContainerToPodDependencyInSameNamespaceSucceeds(t *testing.T) {
}
func TestAddContainerToPodDependencyInSeparateNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
testCtr1.config.Namespace = "test1"
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
testCtr2.config.IPCNsCtr = testCtr1.ID()
@@ -3013,12 +3017,12 @@ func TestAddContainerToPodDependencyInSeparateNamespaceFails(t *testing.T) {
}
func TestAddContainerToPodSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
testCtr.config.Pod = testPod.ID()
@@ -3037,12 +3041,12 @@ func TestAddContainerToPodSameNamespaceSucceeds(t *testing.T) {
}
func TestAddContainerToPodDifferentNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test2"
testCtr.config.Pod = testPod.ID()
@@ -3060,11 +3064,11 @@ func TestAddContainerToPodDifferentNamespaceFails(t *testing.T) {
}
func TestAddContainerToPodNamespaceOnCtrFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
testCtr.config.Pod = testPod.ID()
@@ -3082,12 +3086,12 @@ func TestAddContainerToPodNamespaceOnCtrFails(t *testing.T) {
}
func TestAddContainerToPodNamespaceOnPodFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -3104,11 +3108,11 @@ func TestAddContainerToPodNamespaceOnPodFails(t *testing.T) {
}
func TestAddCtrToPodSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
- testPod, err := getTestPod2(lockPath)
+ testPod, err := getTestPod2(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -3131,11 +3135,11 @@ func TestAddCtrToPodSameNamespaceSucceeds(t *testing.T) {
}
func TestAddCtrToPodDifferentNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
- testPod, err := getTestPod2(lockPath)
+ testPod, err := getTestPod2(manager)
assert.NoError(t, err)
testCtr.config.Namespace = "test1"
@@ -3159,8 +3163,8 @@ func TestAddCtrToPodDifferentNamespaceFails(t *testing.T) {
}
func TestRemoveContainerFromPodBadPodFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testCtr, err := getTestCtr1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testCtr, err := getTestCtr1(manager)
assert.NoError(t, err)
err = state.RemoveContainerFromPod(&Pod{config: &PodConfig{}}, testCtr)
@@ -3169,11 +3173,11 @@ func TestRemoveContainerFromPodBadPodFails(t *testing.T) {
}
func TestRemoveContainerFromPodPodNotInStateFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -3185,11 +3189,11 @@ func TestRemoveContainerFromPodPodNotInStateFails(t *testing.T) {
}
func TestRemoveContainerFromPodCtrNotInStateFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -3204,11 +3208,11 @@ func TestRemoveContainerFromPodCtrNotInStateFails(t *testing.T) {
}
func TestRemoveContainerFromPodCtrNotInPodFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -3229,11 +3233,11 @@ func TestRemoveContainerFromPodCtrNotInPodFails(t *testing.T) {
}
func TestRemoveContainerFromPodSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -3257,15 +3261,15 @@ func TestRemoveContainerFromPodSucceeds(t *testing.T) {
}
func TestRemoveContainerFromPodWithDependencyFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
testCtr2.config.IPCNsCtr = testCtr1.ID()
@@ -3293,15 +3297,15 @@ func TestRemoveContainerFromPodWithDependencyFails(t *testing.T) {
}
func TestRemoveContainerFromPodWithDependencySucceedsAfterDepRemoved(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
- testCtr1, err := getTestCtr2(lockPath)
+ testCtr1, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr1.config.Pod = testPod.ID()
- testCtr2, err := getTestCtrN("3", lockPath)
+ testCtr2, err := getTestCtrN("3", manager)
assert.NoError(t, err)
testCtr2.config.Pod = testPod.ID()
testCtr2.config.IPCNsCtr = testCtr1.ID()
@@ -3332,13 +3336,13 @@ func TestRemoveContainerFromPodWithDependencySucceedsAfterDepRemoved(t *testing.
}
func TestRemoveContainerFromPodSameNamespaceSucceeds(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -3366,13 +3370,13 @@ func TestRemoveContainerFromPodSameNamespaceSucceeds(t *testing.T) {
}
func TestRemoveContainerFromPodDifferentNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
- testCtr, err := getTestCtr2(lockPath)
+ testCtr, err := getTestCtr2(manager)
assert.NoError(t, err)
testCtr.config.Pod = testPod.ID()
@@ -3402,15 +3406,15 @@ func TestRemoveContainerFromPodDifferentNamespaceFails(t *testing.T) {
}
func TestUpdatePodInvalidPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
err := state.UpdatePod(&Pod{config: &PodConfig{}})
assert.Error(t, err)
})
}
func TestUpdatePodPodNotInStateFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.UpdatePod(testPod)
@@ -3419,8 +3423,8 @@ func TestUpdatePodPodNotInStateFails(t *testing.T) {
}
func TestUpdatePodNotInNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -3436,15 +3440,15 @@ func TestUpdatePodNotInNamespaceFails(t *testing.T) {
}
func TestSavePodInvalidPod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
err := state.SavePod(&Pod{config: &PodConfig{}})
assert.Error(t, err)
})
}
func TestSavePodPodNotInStateFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.SavePod(testPod)
@@ -3453,8 +3457,8 @@ func TestSavePodPodNotInStateFails(t *testing.T) {
}
func TestSavePodNotInNamespaceFails(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
@@ -3470,8 +3474,8 @@ func TestSavePodNotInNamespaceFails(t *testing.T) {
}
func TestSaveAndUpdatePod(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
err = state.AddPod(testPod)
@@ -3495,8 +3499,8 @@ func TestSaveAndUpdatePod(t *testing.T) {
}
func TestSaveAndUpdatePodSameNamespace(t *testing.T) {
- runForAllStates(t, func(t *testing.T, state State, lockPath string) {
- testPod, err := getTestPod1(lockPath)
+ runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) {
+ testPod, err := getTestPod1(manager)
assert.NoError(t, err)
testPod.config.Namespace = "test1"
diff --git a/libpod/volume.go b/libpod/volume.go
index b732e8aa7..026a3bf49 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -1,6 +1,6 @@
package libpod
-import "github.com/containers/storage"
+import "github.com/containers/libpod/libpod/lock"
// Volume is the type used to create named volumes
// TODO: all volumes should be created using this and the Volume API
@@ -9,13 +9,17 @@ type Volume struct {
valid bool
runtime *Runtime
- lock storage.Locker
+ lock lock.Locker
}
// VolumeConfig holds the volume's config information
//easyjson:json
type VolumeConfig struct {
- Name string `json:"name"`
+ // Name of the volume
+ Name string `json:"name"`
+ // ID of this volume's lock
+ LockID uint32 `json:"lockID"`
+
Labels map[string]string `json:"labels"`
MountPoint string `json:"mountPoint"`
Driver string `json:"driver"`
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index 25f8cd7a1..ffc98e307 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -2,6 +2,7 @@ package createconfig
import (
"encoding/json"
+ "fmt"
"net"
"os"
"strconv"
@@ -145,6 +146,36 @@ func (c *CreateConfig) CreateBlockIO() (*spec.LinuxBlockIO, error) {
return c.createBlockIO()
}
+// AddContainerInitBinary adds the init binary specified by path iff the
+// container will run in a private PID namespace that is not shared with the
+// host or another pre-existing container, where an init-like process is
+// already running.
+//
+// Note that AddContainerInitBinary prepends "/dev/init" "--" to the command
+// to execute the bind-mounted binary as PID 1.
+func (c *CreateConfig) AddContainerInitBinary(path string) error {
+ if path == "" {
+ return fmt.Errorf("please specify a path to the container-init binary")
+ }
+ if !c.PidMode.IsPrivate() {
+ return fmt.Errorf("cannot add init binary as PID 1 (PID namespace isn't private)")
+ }
+ if c.Systemd {
+ return fmt.Errorf("cannot use container-init binary with systemd")
+ }
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ return errors.Wrap(err, "container-init binary not found on the host")
+ }
+ c.Command = append([]string{"/dev/init", "--"}, c.Command...)
+ c.Mounts = append(c.Mounts, spec.Mount{
+ Destination: "/dev/init",
+ Type: "bind",
+ Source: path,
+ Options: []string{"bind", "ro"},
+ })
+ return nil
+}
+
func processOptions(options []string) []string {
var (
foundrw, foundro bool
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index 3d6603364..ffa999730 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -6,8 +6,8 @@ import (
"strings"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/storage/pkg/mount"
"github.com/docker/docker/daemon/caps"
- "github.com/docker/docker/pkg/mount"
"github.com/docker/go-units"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/README.md b/pkg/sysinfo/README.md
index c1530cef0..c1530cef0 100644
--- a/vendor/github.com/docker/docker/pkg/sysinfo/README.md
+++ b/pkg/sysinfo/README.md
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go b/pkg/sysinfo/numcpu.go
index aeb1a3a80..aeb1a3a80 100644
--- a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go
+++ b/pkg/sysinfo/numcpu.go
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go b/pkg/sysinfo/numcpu_linux.go
index f1d2d9db3..f1d2d9db3 100644
--- a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go
+++ b/pkg/sysinfo/numcpu_linux.go
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go b/pkg/sysinfo/numcpu_windows.go
index 1d89dd550..1d89dd550 100644
--- a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go
+++ b/pkg/sysinfo/numcpu_windows.go
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go
index f046de4b1..f046de4b1 100644
--- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go
+++ b/pkg/sysinfo/sysinfo.go
diff --git a/pkg/sysinfo/sysinfo_linux.go b/pkg/sysinfo/sysinfo_linux.go
new file mode 100644
index 000000000..f4047b63c
--- /dev/null
+++ b/pkg/sysinfo/sysinfo_linux.go
@@ -0,0 +1,254 @@
+package sysinfo
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+func findCgroupMountpoints() (map[string]string, error) {
+ cgMounts, err := cgroups.GetCgroupMounts(false)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse cgroup information: %v", err)
+ }
+ mps := make(map[string]string)
+ for _, m := range cgMounts {
+ for _, ss := range m.Subsystems {
+ mps[ss] = m.Mountpoint
+ }
+ }
+ return mps, nil
+}
+
+// New returns a new SysInfo, using the filesystem to detect which features
+// the kernel supports. If `quiet` is `false` warnings are printed in logs
+// whenever an error occurs or misconfigurations are present.
+func New(quiet bool) *SysInfo {
+ sysInfo := &SysInfo{}
+ cgMounts, err := findCgroupMountpoints()
+ if err != nil {
+ logrus.Warnf("Failed to parse cgroup information: %v", err)
+ } else {
+ sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet)
+ sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet)
+ sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet)
+ sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet)
+ sysInfo.cgroupPids = checkCgroupPids(quiet)
+ }
+
+ _, ok := cgMounts["devices"]
+ sysInfo.CgroupDevicesEnabled = ok
+
+ sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward")
+ sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables")
+ sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables")
+
+ // Check if AppArmor is supported.
+ if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) {
+ sysInfo.AppArmor = true
+ }
+
+ // Check if Seccomp is supported, via CONFIG_SECCOMP.
+ if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
+ // Make sure the kernel has CONFIG_SECCOMP_FILTER.
+ if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
+ sysInfo.Seccomp = true
+ }
+ }
+
+ return sysInfo
+}
+
+// checkCgroupMem reads the memory information from the memory cgroup mount point.
+func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo {
+ mountPoint, ok := cgMounts["memory"]
+ if !ok {
+ if !quiet {
+ logrus.Warn("Your kernel does not support cgroup memory limit")
+ }
+ return cgroupMemInfo{}
+ }
+
+ swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes")
+ if !quiet && !swapLimit {
+ logrus.Warn("Your kernel does not support swap memory limit")
+ }
+ memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes")
+ if !quiet && !memoryReservation {
+ logrus.Warn("Your kernel does not support memory reservation")
+ }
+ oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control")
+ if !quiet && !oomKillDisable {
+ logrus.Warn("Your kernel does not support oom control")
+ }
+ memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness")
+ if !quiet && !memorySwappiness {
+ logrus.Warn("Your kernel does not support memory swappiness")
+ }
+ kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes")
+ if !quiet && !kernelMemory {
+ logrus.Warn("Your kernel does not support kernel memory limit")
+ }
+
+ return cgroupMemInfo{
+ MemoryLimit: true,
+ SwapLimit: swapLimit,
+ MemoryReservation: memoryReservation,
+ OomKillDisable: oomKillDisable,
+ MemorySwappiness: memorySwappiness,
+ KernelMemory: kernelMemory,
+ }
+}
+
+// checkCgroupCPU reads the cpu information from the cpu cgroup mount point.
+func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo {
+ mountPoint, ok := cgMounts["cpu"]
+ if !ok {
+ if !quiet {
+ logrus.Warn("Unable to find cpu cgroup in mounts")
+ }
+ return cgroupCPUInfo{}
+ }
+
+ cpuShares := cgroupEnabled(mountPoint, "cpu.shares")
+ if !quiet && !cpuShares {
+ logrus.Warn("Your kernel does not support cgroup cpu shares")
+ }
+
+ cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us")
+ if !quiet && !cpuCfsPeriod {
+ logrus.Warn("Your kernel does not support cgroup cfs period")
+ }
+
+ cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us")
+ if !quiet && !cpuCfsQuota {
+ logrus.Warn("Your kernel does not support cgroup cfs quotas")
+ }
+
+ cpuRealtimePeriod := cgroupEnabled(mountPoint, "cpu.rt_period_us")
+ if !quiet && !cpuRealtimePeriod {
+ logrus.Warn("Your kernel does not support cgroup rt period")
+ }
+
+ cpuRealtimeRuntime := cgroupEnabled(mountPoint, "cpu.rt_runtime_us")
+ if !quiet && !cpuRealtimeRuntime {
+ logrus.Warn("Your kernel does not support cgroup rt runtime")
+ }
+
+ return cgroupCPUInfo{
+ CPUShares: cpuShares,
+ CPUCfsPeriod: cpuCfsPeriod,
+ CPUCfsQuota: cpuCfsQuota,
+ CPURealtimePeriod: cpuRealtimePeriod,
+ CPURealtimeRuntime: cpuRealtimeRuntime,
+ }
+}
+
+// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point.
+func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo {
+ mountPoint, ok := cgMounts["blkio"]
+ if !ok {
+ if !quiet {
+ logrus.Warn("Unable to find blkio cgroup in mounts")
+ }
+ return cgroupBlkioInfo{}
+ }
+
+ weight := cgroupEnabled(mountPoint, "blkio.weight")
+ if !quiet && !weight {
+ logrus.Warn("Your kernel does not support cgroup blkio weight")
+ }
+
+ weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device")
+ if !quiet && !weightDevice {
+ logrus.Warn("Your kernel does not support cgroup blkio weight_device")
+ }
+
+ readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device")
+ if !quiet && !readBpsDevice {
+ logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device")
+ }
+
+ writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device")
+ if !quiet && !writeBpsDevice {
+ logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device")
+ }
+ readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device")
+ if !quiet && !readIOpsDevice {
+ logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device")
+ }
+
+ writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device")
+ if !quiet && !writeIOpsDevice {
+ logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device")
+ }
+ return cgroupBlkioInfo{
+ BlkioWeight: weight,
+ BlkioWeightDevice: weightDevice,
+ BlkioReadBpsDevice: readBpsDevice,
+ BlkioWriteBpsDevice: writeBpsDevice,
+ BlkioReadIOpsDevice: readIOpsDevice,
+ BlkioWriteIOpsDevice: writeIOpsDevice,
+ }
+}
+
+// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point.
+func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo {
+ mountPoint, ok := cgMounts["cpuset"]
+ if !ok {
+ if !quiet {
+ logrus.Warn("Unable to find cpuset cgroup in mounts")
+ }
+ return cgroupCpusetInfo{}
+ }
+
+ cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus"))
+ if err != nil {
+ return cgroupCpusetInfo{}
+ }
+
+ mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems"))
+ if err != nil {
+ return cgroupCpusetInfo{}
+ }
+
+ return cgroupCpusetInfo{
+ Cpuset: true,
+ Cpus: strings.TrimSpace(string(cpus)),
+ Mems: strings.TrimSpace(string(mems)),
+ }
+}
+
+// checkCgroupPids reads the pids information from the pids cgroup mount point.
+func checkCgroupPids(quiet bool) cgroupPids {
+ _, err := cgroups.FindCgroupMountpoint("", "pids")
+ if err != nil {
+ if !quiet {
+ logrus.Warn(err)
+ }
+ return cgroupPids{}
+ }
+
+ return cgroupPids{
+ PidsLimit: true,
+ }
+}
+
+func cgroupEnabled(mountPoint, name string) bool {
+ _, err := os.Stat(path.Join(mountPoint, name))
+ return err == nil
+}
+
+func readProcBool(path string) bool {
+ val, err := ioutil.ReadFile(path)
+ if err != nil {
+ return false
+ }
+ return strings.TrimSpace(string(val)) == "1"
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/pkg/sysinfo/sysinfo_linux.go~
index 471f786a7..471f786a7 100644
--- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go
+++ b/pkg/sysinfo/sysinfo_linux.go~
diff --git a/pkg/sysinfo/sysinfo_linux_test.go b/pkg/sysinfo/sysinfo_linux_test.go
new file mode 100644
index 000000000..860784f2a
--- /dev/null
+++ b/pkg/sysinfo/sysinfo_linux_test.go
@@ -0,0 +1,104 @@
+package sysinfo
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sys/unix"
+)
+
+func TestReadProcBool(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc")
+ require.NoError(t, err)
+ defer os.RemoveAll(tmpDir)
+
+ procFile := filepath.Join(tmpDir, "read-proc-bool")
+ err = ioutil.WriteFile(procFile, []byte("1"), 0644)
+ require.NoError(t, err)
+
+ if !readProcBool(procFile) {
+ t.Fatal("expected proc bool to be true, got false")
+ }
+
+ if err := ioutil.WriteFile(procFile, []byte("0"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ if readProcBool(procFile) {
+ t.Fatal("expected proc bool to be false, got true")
+ }
+
+ if readProcBool(path.Join(tmpDir, "no-exist")) {
+ t.Fatal("should be false for non-existent entry")
+ }
+
+}
+
+func TestCgroupEnabled(t *testing.T) {
+ cgroupDir, err := ioutil.TempDir("", "cgroup-test")
+ require.NoError(t, err)
+ defer os.RemoveAll(cgroupDir)
+
+ if cgroupEnabled(cgroupDir, "test") {
+ t.Fatal("cgroupEnabled should be false")
+ }
+
+ err = ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 0644)
+ require.NoError(t, err)
+
+ if !cgroupEnabled(cgroupDir, "test") {
+ t.Fatal("cgroupEnabled should be true")
+ }
+}
+
+func TestNew(t *testing.T) {
+ sysInfo := New(false)
+ require.NotNil(t, sysInfo)
+ checkSysInfo(t, sysInfo)
+
+ sysInfo = New(true)
+ require.NotNil(t, sysInfo)
+ checkSysInfo(t, sysInfo)
+}
+
+func checkSysInfo(t *testing.T, sysInfo *SysInfo) {
+ // Check if Seccomp is supported, via CONFIG_SECCOMP.then sysInfo.Seccomp must be TRUE , else FALSE
+ if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
+ // Make sure the kernel has CONFIG_SECCOMP_FILTER.
+ if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
+ require.True(t, sysInfo.Seccomp)
+ }
+ } else {
+ require.False(t, sysInfo.Seccomp)
+ }
+}
+
+func TestNewAppArmorEnabled(t *testing.T) {
+ // Check if AppArmor is supported. then it must be TRUE , else FALSE
+ if _, err := os.Stat("/sys/kernel/security/apparmor"); err != nil {
+ t.Skip("App Armor Must be Enabled")
+ }
+
+ sysInfo := New(true)
+ require.True(t, sysInfo.AppArmor)
+}
+
+func TestNewAppArmorDisabled(t *testing.T) {
+ // Check if AppArmor is supported. then it must be TRUE , else FALSE
+ if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) {
+ t.Skip("App Armor Must be Disabled")
+ }
+
+ sysInfo := New(true)
+ require.False(t, sysInfo.AppArmor)
+}
+
+func TestNumCPU(t *testing.T) {
+ cpuNumbers := NumCPU()
+ if cpuNumbers <= 0 {
+ t.Fatal("CPU returned must be greater than zero")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go b/pkg/sysinfo/sysinfo_solaris.go
index c858d57e0..c858d57e0 100644
--- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go
+++ b/pkg/sysinfo/sysinfo_solaris.go
diff --git a/pkg/sysinfo/sysinfo_test.go b/pkg/sysinfo/sysinfo_test.go
new file mode 100644
index 000000000..b61fbcf54
--- /dev/null
+++ b/pkg/sysinfo/sysinfo_test.go
@@ -0,0 +1,26 @@
+package sysinfo
+
+import "testing"
+
+func TestIsCpusetListAvailable(t *testing.T) {
+ cases := []struct {
+ provided string
+ available string
+ res bool
+ err bool
+ }{
+ {"1", "0-4", true, false},
+ {"01,3", "0-4", true, false},
+ {"", "0-7", true, false},
+ {"1--42", "0-7", false, true},
+ {"1-42", "00-1,8,,9", false, true},
+ {"1,41-42", "43,45", false, false},
+ {"0-3", "", false, false},
+ }
+ for _, c := range cases {
+ r, err := isCpusetListAvailable(c.provided, c.available)
+ if (c.err && err == nil) && r != c.res {
+ t.Fatalf("Expected pair: %v, %v for %s, %s. Got %v, %v instead", c.res, c.err, c.provided, c.available, (c.err && err == nil), r)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go b/pkg/sysinfo/sysinfo_unix.go
index 45f3ef1c6..45f3ef1c6 100644
--- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go
+++ b/pkg/sysinfo/sysinfo_unix.go
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go b/pkg/sysinfo/sysinfo_windows.go
index 4e6255bc5..4e6255bc5 100644
--- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go
+++ b/pkg/sysinfo/sysinfo_windows.go
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index cf366b197..a0b16a254 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -87,6 +87,18 @@ var _ = Describe("Podman run", func() {
Expect(session.ExitCode()).To(Equal(0))
})
+ It("podman run a container with --init", func() {
+ session := podmanTest.Podman([]string{"run", "--init", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+
+ It("podman run a container with --init and --init-path", func() {
+ session := podmanTest.Podman([]string{"run", "--init", "--init-path", "/usr/libexec/podman/catatonit", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+
It("podman run seccomp test", func() {
jsonFile := filepath.Join(podmanTest.TempDir, "seccomp.json")
in := []byte(`{"defaultAction":"SCMP_ACT_ALLOW","syscalls":[{"name":"getcwd","action":"SCMP_ACT_ERRNO"}]}`)
diff --git a/troubleshooting.md b/troubleshooting.md
index 4649cccb3..561562193 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -86,6 +86,9 @@ could not get runtime: database run root /run/user/1000/run does not match our r
#### Solution
+This problem has been fixed in Podman release 0.12.1 and it is recommended
+to upgrade to that version. If that is not possible use the following procedure.
+
To work around the new default path, we can manually set the path Podman is
expecting in a configuration file.
@@ -101,7 +104,7 @@ the first path in the error message Podman gave (in this case,
`/run/user/1000/run`).
---
-### 4) rootless containers cannot ping hosts
+### 5) rootless containers cannot ping hosts
When using the ping command from a non-root container, the command may
fail because of a lack of privileges.
@@ -128,9 +131,10 @@ To change its value you can use something like: `sysctl -w
To make the change persistent, you'll need to add a file in
`/etc/sysctl.d` that contains `net.ipv4.ping_group_range=0 $MAX_UID`.
-### 5) Build hangs when the Dockerfile contains the useradd command
+---
+### 6) Build hangs when the Dockerfile contains the useradd command
-When the Dockerfile contains a command like RUN useradd -u 99999000 -g users newuser the build can hang.
+When the Dockerfile contains a command like `RUN useradd -u 99999000 -g users newuser` the build can hang.
#### Symptom
diff --git a/vendor.conf b/vendor.conf
index 1ef9ada84..9af218d15 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -11,8 +11,8 @@ github.com/containerd/cgroups 58556f5ad8448d99a6f7bea69ea4bdb7747cfeb0
github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1
github.com/containernetworking/plugins 1562a1e60ed101aacc5e08ed9dbeba8e9f3d4ec1
-github.com/containers/image d53afe179b381fafb427e6b9cf9b1996a98c1067
-github.com/containers/storage db40f96d853dfced60c563e61fb66ba231ce7c8d
+github.com/containers/image f0cbc16b444d729362c78244c715b45bf4019b71
+github.com/containers/storage v1.4
github.com/containers/psgo dc0bc9fac5b715034c4310ed4d795b3182360842
github.com/coreos/go-systemd v14
github.com/cri-o/ocicni 2d2983e40c242322a56c22a903785e7f83eb378c
@@ -46,9 +46,9 @@ github.com/kr/pty v1.0.0
github.com/mattn/go-runewidth v0.0.1
github.com/mistifyio/go-zfs v2.1.1
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
-github.com/opencontainers/go-digest v1.0.0-rc0
+github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.0
-github.com/opencontainers/runc b4e2ecb452d9ee4381137cc0a7e6715b96bed6de
+github.com/opencontainers/runc bbb17efcb4c0ab986407812a31ba333a7450064c
github.com/opencontainers/runtime-spec d810dbc60d8c5aeeb3d054bd1132fab2121968ce
github.com/opencontainers/runtime-tools master
github.com/opencontainers/selinux 51c6c0a5dbc675792e953298cb9871819d6f9bb8
@@ -78,7 +78,7 @@ golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
golang.org/x/sync master
google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go
-gopkg.in/cheggaaa/pb.v1 v1.0.7
+gopkg.in/cheggaaa/pb.v1 v1.0.27
gopkg.in/inf.v0 v0.9.0
gopkg.in/mgo.v2 v2
gopkg.in/square/go-jose.v2 v2.1.3
@@ -92,7 +92,7 @@ k8s.io/kube-openapi 275e2ce91dec4c05a4094a7b1daee5560b555ac9 https://github.com/
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils
github.com/mrunalp/fileutils master
github.com/varlink/go master
-github.com/containers/buildah dd0f4f1b1eb49b841179049ac498e4b0f874b462
+github.com/containers/buildah a4200ae6b590c4b08b223e45513b1894636d1d02
github.com/Nvveen/Gotty master
github.com/fsouza/go-dockerclient master
github.com/openshift/imagebuilder master
@@ -101,3 +101,6 @@ github.com/mailru/easyjson 03f2033d19d5860aef995fe360ac7d395cd8ce65
github.com/coreos/go-iptables 25d087f3cffd9aedc0c2b7eff25f23cbf3c20fe1
github.com/google/shlex c34317bd91bf98fab745d77b03933cf8769299fe
github.com/pkg/profile v1.2.1
+github.com/klauspost/pgzip v1.2.1
+github.com/klauspost/compress v1.4.1
+github.com/klauspost/cpuid v1.2.0
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index dfdc33a22..e369dc407 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -6,10 +6,8 @@ import (
"path/filepath"
cp "github.com/containers/image/copy"
- "github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/containers/libpod/pkg/rootless"
- "github.com/sirupsen/logrus"
)
const (
@@ -34,12 +32,6 @@ func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference
}
}
- sourceInsecure, err := isReferenceInsecure(sourceReference, sourceCtx)
- if err != nil {
- logrus.Debugf("error determining if registry for %q is insecure: %v", transports.ImageName(sourceReference), err)
- } else if sourceInsecure {
- sourceCtx.OCIInsecureSkipTLSVerify = true
- }
destinationCtx := &types.SystemContext{}
if destinationSystemContext != nil {
@@ -51,12 +43,6 @@ func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference
}
}
}
- destinationInsecure, err := isReferenceInsecure(destinationReference, destinationCtx)
- if err != nil {
- logrus.Debugf("error determining if registry for %q is insecure: %v", transports.ImageName(destinationReference), err)
- } else if destinationInsecure {
- destinationCtx.OCIInsecureSkipTLSVerify = true
- }
return &cp.Options{
ReportWriter: reportWriter,
diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go
index 3609694f6..114b8ca37 100644
--- a/vendor/github.com/containers/buildah/config.go
+++ b/vendor/github.com/containers/buildah/config.go
@@ -474,9 +474,6 @@ func (b *Builder) Hostname() string {
// Note: this setting is not present in the OCIv1 image format, so it is
// discarded when writing images using OCIv1 formats.
func (b *Builder) SetHostname(name string) {
- if name != "" && b.Format != Dockerv2ImageManifest {
- logrus.Errorf("HOSTNAME is not supported for OCI image format, hostname %s will be ignored. Must use `docker` format", name)
- }
b.Docker.Config.Hostname = name
}
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 163d34269..47842db7d 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -557,6 +557,10 @@ func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.B
return nil, nil
}
+func (i *containerImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) {
if blob.Digest == i.configDigest {
logrus.Debugf("start reading config")
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 2681bc198..d838260e7 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -1307,7 +1307,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin
var imageRef reference.Canonical
imageID := ""
- if !b.layers && !b.noCache {
+
+ // Check if we have a one line Dockerfile making layers irrelevant
+ // or the user told us to ignore layers.
+ ignoreLayers := (len(stages) < 2 && len(stages[0].Node.Children) < 2) || (!b.layers && !b.noCache)
+
+ if ignoreLayers {
imgID, ref, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "")
if err != nil {
return "", nil, err
diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
index edb5837db..6feedf6a5 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
@@ -131,6 +131,11 @@ func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) {
func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) {
var timeIsGreater bool
+ // the Walk below doesn't work if rootdir and path are equal
+ if rootdir == path {
+ return false, nil
+ }
+
// Convert historyTime from string to time.Time for comparison
histTime, err := time.Parse(time.RFC3339Nano, historyTime)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
index 63022f15d..ae55316b0 100644
--- a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
+++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
@@ -218,6 +218,10 @@ func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *diges
return s.source.GetManifest(ctx, instanceDigest)
}
+func (s *blobCacheSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
present, size, err := s.reference.HasBlob(blobinfo)
if err != nil {
@@ -398,6 +402,10 @@ func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os
}
}
+func (s *blobCacheDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
var tempfile *os.File
var err error
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index e3a4fe62a..dc1b2bc69 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -148,6 +148,10 @@ var (
Name: "loglevel",
Usage: "adjust logging level (range from -2 to 3)",
},
+ cli.StringFlag{
+ Name: "platform",
+ Usage: "CLI compatibility: no action or effect",
+ },
cli.BoolTFlag{
Name: "pull",
Usage: "pull the image if not present",
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index 5d2cd6a32..a0627f489 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -26,6 +26,7 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/reexec"
+ "github.com/containers/storage/pkg/stringid"
units "github.com/docker/go-units"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -944,10 +945,25 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions)
g.SetHostname(options.Hostname)
} else if b.Hostname() != "" {
g.SetHostname(b.Hostname())
+ } else {
+ g.SetHostname(stringid.TruncateID(b.ContainerID))
}
} else {
g.SetHostname("")
}
+
+ found := false
+ spec := g.Spec()
+ for i := range spec.Process.Env {
+ if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") {
+ found = true
+ break
+ }
+ }
+ if !found {
+ spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname))
+ }
+
return configureNetwork, configureNetworks, nil
}
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.c b/vendor/github.com/containers/buildah/unshare/unshare.c
index 6873e49c2..3865e414f 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.c
+++ b/vendor/github.com/containers/buildah/unshare/unshare.c
@@ -79,7 +79,8 @@ void _buildah_unshare(void)
pidfd = _buildah_unshare_parse_envint("_Buildah-pid-pipe");
if (pidfd != -1) {
snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
- if (write(pidfd, buf, strlen(buf)) != strlen(buf)) {
+ size_t size = write(pidfd, buf, strlen(buf));
+ if (size != strlen(buf)) {
fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd);
_exit(1);
}
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 66a4e535a..5dadec7c2 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -173,24 +173,6 @@ func (b *Builder) tarPath() func(path string) (io.ReadCloser, error) {
}
}
-// isRegistryInsecure checks if the named registry is marked as not secure
-func isRegistryInsecure(registry string, sc *types.SystemContext) (bool, error) {
- reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
- if err != nil {
- return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistries.RegistriesConfPath(sc))
- }
- if reginfo != nil {
- if reginfo.Insecure {
- logrus.Debugf("registry %q is marked insecure in registries configuration %q", registry, sysregistries.RegistriesConfPath(sc))
- } else {
- logrus.Debugf("registry %q is not marked insecure in registries configuration %q", registry, sysregistries.RegistriesConfPath(sc))
- }
- return reginfo.Insecure, nil
- }
- logrus.Debugf("registry %q is not listed in registries configuration %q, assuming it's secure", registry, sysregistries.RegistriesConfPath(sc))
- return false, nil
-}
-
// isRegistryBlocked checks if the named registry is marked as blocked
func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
@@ -221,11 +203,6 @@ func isReferenceSomething(ref types.ImageReference, sc *types.SystemContext, wha
return false, nil
}
-// isReferenceInsecure checks if the registry part of a reference is insecure
-func isReferenceInsecure(ref types.ImageReference, sc *types.SystemContext) (bool, error) {
- return isReferenceSomething(ref, sc, isRegistryInsecure)
-}
-
// isReferenceBlocked checks if the registry part of a reference is blocked
func isReferenceBlocked(ref types.ImageReference, sc *types.SystemContext) (bool, error) {
if ref != nil && ref.Transport() != nil {
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 427c8db28..f4cac522e 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -395,57 +395,11 @@ func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.Linux
// ParseIDMappings parses mapping triples.
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
- nonDigitsToWhitespace := func(r rune) rune {
- if strings.IndexRune("0123456789", r) == -1 {
- return ' '
- } else {
- return r
- }
- }
- parseTriple := func(spec []string) (container, host, size uint32, err error) {
- cid, err := strconv.ParseUint(spec[0], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err)
- }
- hid, err := strconv.ParseUint(spec[1], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err)
- }
- sz, err := strconv.ParseUint(spec[2], 10, 32)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err)
- }
- return uint32(cid), uint32(hid), uint32(sz), nil
- }
- parseIDMap := func(mapSpec []string, mapSetting string) (idmap []idtools.IDMap, err error) {
- for _, idMapSpec := range mapSpec {
- idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec))
- if len(idSpec)%3 != 0 {
- return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
- }
- for i := range idSpec {
- if i%3 != 0 {
- continue
- }
- cid, hid, size, err := parseTriple(idSpec[i : i+3])
- if err != nil {
- return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
- }
- mapping := idtools.IDMap{
- ContainerID: int(cid),
- HostID: int(hid),
- Size: int(size),
- }
- idmap = append(idmap, mapping)
- }
- }
- return idmap, nil
- }
- uid, err := parseIDMap(uidmap, "userns-uid-map")
+ uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
if err != nil {
return nil, nil, err
}
- gid, err := parseIDMap(gidmap, "userns-gid-map")
+ gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index 61325114c..ee8192935 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -3,10 +3,10 @@ github.com/blang/semver master
github.com/BurntSushi/toml master
github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1
-github.com/containers/image d53afe179b381fafb427e6b9cf9b1996a98c1067
+github.com/containers/image 0c6cc8e1420001ae39fa89d308b3b3bc5ee81c57
github.com/boltdb/bolt master
-github.com/containers/libpod fe4f09493f41f675d24c969d1b60d1a6a45ddb9e
-github.com/containers/storage db40f96d853dfced60c563e61fb66ba231ce7c8d
+github.com/containers/libpod c8eaf59d5f4bec249db8134c6a9fcfbcac792519
+github.com/containers/storage 60a692f7ce891feb91ce0eda87bd06bfd5651dff
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
@@ -32,7 +32,7 @@ github.com/mistifyio/go-zfs master
github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
github.com/mtrmac/gpgme master
github.com/Nvveen/Gotty master
-github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
+github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc master
github.com/opencontainers/runtime-spec v1.0.0
@@ -55,10 +55,14 @@ github.com/xeipuuv/gojsonreference master
github.com/xeipuuv/gojsonschema master
golang.org/x/crypto master
golang.org/x/net master
+golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
golang.org/x/sys master
golang.org/x/text master
-gopkg.in/cheggaaa/pb.v1 v1.0.13
+gopkg.in/cheggaaa/pb.v1 v1.0.27
gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
k8s.io/apimachinery master
k8s.io/client-go master
k8s.io/kubernetes master
+github.com/klauspost/pgzip v1.2.1
+github.com/klauspost/compress v1.4.1
+github.com/klauspost/cpuid v1.2.0
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
index 013080e8d..89c7e580f 100644
--- a/vendor/github.com/containers/image/copy/copy.go
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -2,7 +2,6 @@ package copy
import (
"bytes"
- "compress/gzip"
"context"
"fmt"
"io"
@@ -10,6 +9,7 @@ import (
"reflect"
"runtime"
"strings"
+ "sync"
"time"
"github.com/containers/image/image"
@@ -18,9 +18,11 @@ import (
"github.com/containers/image/signature"
"github.com/containers/image/transports"
"github.com/containers/image/types"
+ "github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ "golang.org/x/sync/semaphore"
pb "gopkg.in/cheggaaa/pb.v1"
)
@@ -32,6 +34,10 @@ type digestingReader struct {
validationSucceeded bool
}
+// maxParallelDownloads is used to limit the maxmimum number of parallel
+// downloads. Let's follow Firefox by limiting it to 6.
+var maxParallelDownloads = 6
+
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
// (neither is set if EOF is never reached).
@@ -81,6 +87,7 @@ type copier struct {
progressInterval time.Duration
progress chan types.ProgressProperties
blobInfoCache types.BlobInfoCache
+ copyInParallel bool
}
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
@@ -145,12 +152,14 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}
}()
+ copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
c := &copier{
dest: dest,
rawSource: rawSource,
reportWriter: reportWriter,
progressInterval: options.ProgressInterval,
progress: options.Progress,
+ copyInParallel: copyInParallel,
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
// we might want to add a separate CommonCtx — or would that be too confusing?
@@ -380,11 +389,26 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
return nil
}
+// shortDigest returns the first 12 characters of the digest.
+func shortDigest(d digest.Digest) string {
+ return d.Encoded()[:12]
+}
+
+// createProgressBar creates a pb.ProgressBar.
+func createProgressBar(srcInfo types.BlobInfo, kind string, writer io.Writer) *pb.ProgressBar {
+ bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
+ bar.SetMaxWidth(80)
+ bar.ShowTimeLeft = false
+ bar.ShowPercent = false
+ bar.Prefix(fmt.Sprintf("Copying %s %s:", kind, shortDigest(srcInfo.Digest)))
+ bar.Output = writer
+ return bar
+}
+
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
func (ic *imageCopier) copyLayers(ctx context.Context) error {
srcInfos := ic.src.LayerInfos()
- destInfos := []types.BlobInfo{}
- diffIDs := []digest.Digest{}
+ numLayers := len(srcInfos)
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
if err != nil {
return err
@@ -397,30 +421,83 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
srcInfos = updatedSrcInfos
srcInfosUpdated = true
}
- for _, srcLayer := range srcInfos {
- var (
- destInfo types.BlobInfo
- diffID digest.Digest
- err error
- )
+
+ type copyLayerData struct {
+ destInfo types.BlobInfo
+ diffID digest.Digest
+ err error
+ }
+
+ // copyGroup is used to determine if all layers are copied
+ copyGroup := sync.WaitGroup{}
+ copyGroup.Add(numLayers)
+ // copySemaphore is used to limit the number of parallel downloads to
+ // avoid malicious images causing troubles and to be nice to servers.
+ var copySemaphore *semaphore.Weighted
+ if ic.c.copyInParallel {
+ copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads))
+ } else {
+ copySemaphore = semaphore.NewWeighted(int64(1))
+ }
+
+ data := make([]copyLayerData, numLayers)
+ copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *pb.ProgressBar) {
+ defer bar.Finish()
+ defer copySemaphore.Release(1)
+ defer copyGroup.Done()
+ cld := copyLayerData{}
if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
// DiffIDs are, currently, needed only when converting from schema1.
// In which case src.LayerInfos will not have URLs because schema1
// does not support them.
if ic.diffIDsAreNeeded {
- return errors.New("getting DiffID for foreign layers is unimplemented")
+ cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
+ bar.Prefix(fmt.Sprintf("Skipping blob %s (DiffID foreign layer unimplemented):", shortDigest(srcLayer.Digest)))
+ bar.Finish()
+ } else {
+ cld.destInfo = srcLayer
+ logrus.Debugf("Skipping foreign layer %q copy to %s\n", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
+ bar.Prefix(fmt.Sprintf("Skipping blob %s (foreign layer):", shortDigest(srcLayer.Digest)))
+ bar.Add64(bar.Total)
+ bar.Finish()
}
- destInfo = srcLayer
- ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name())
} else {
- destInfo, diffID, err = ic.copyLayer(ctx, srcLayer)
- if err != nil {
- return err
- }
+ cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar)
}
- destInfos = append(destInfos, destInfo)
- diffIDs = append(diffIDs, diffID)
+ data[index] = cld
}
+
+ progressBars := make([]*pb.ProgressBar, numLayers)
+ for i, srcInfo := range srcInfos {
+ bar := createProgressBar(srcInfo, "blob", nil)
+ progressBars[i] = bar
+ }
+
+ progressPool := pb.NewPool(progressBars...)
+ progressPool.Output = ic.c.reportWriter
+ if err := progressPool.Start(); err != nil {
+ return errors.Wrapf(err, "error creating progress-bar pool")
+ }
+
+ for i, srcLayer := range srcInfos {
+ copySemaphore.Acquire(ctx, 1)
+ go copyLayerHelper(i, srcLayer, progressBars[i])
+ }
+
+ destInfos := make([]types.BlobInfo, numLayers)
+ diffIDs := make([]digest.Digest, numLayers)
+
+ copyGroup.Wait()
+ progressPool.Stop()
+
+ for i, cld := range data {
+ if cld.err != nil {
+ return cld.err
+ }
+ destInfos[i] = cld.destInfo
+ diffIDs[i] = cld.diffID
+ }
+
ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
if ic.diffIDsAreNeeded {
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
@@ -487,12 +564,14 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo()
if srcInfo.Digest != "" {
- c.Printf("Copying config %s\n", srcInfo.Digest)
configBlob, err := src.ConfigBlob(ctx)
if err != nil {
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
}
- destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true)
+ bar := createProgressBar(srcInfo, "config", c.reportWriter)
+ defer bar.Finish()
+ bar.Start()
+ destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
if err != nil {
return err
}
@@ -512,7 +591,7 @@ type diffIDResult struct {
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
-func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
+func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *pb.ProgressBar) (types.BlobInfo, digest.Digest, error) {
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
@@ -523,13 +602,14 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
}
if reused {
- ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
+ bar.Prefix(fmt.Sprintf("Skipping blob %s (already present):", shortDigest(srcInfo.Digest)))
+ bar.Add64(bar.Total)
+ bar.Finish()
return blobInfo, cachedDiffID, nil
}
}
// Fallback: copy the layer, computing the diffID if we need to do so
- ic.c.Printf("Copying blob %s\n", srcInfo.Digest)
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
@@ -537,7 +617,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
defer srcStream.Close()
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
- diffIDIsNeeded)
+ diffIDIsNeeded, bar)
if err != nil {
return types.BlobInfo{}, "", err
}
@@ -565,7 +645,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
// perhaps compressing the stream if canCompress,
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
- diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) {
+ diffIDIsNeeded bool, bar *pb.ProgressBar) (types.BlobInfo, <-chan diffIDResult, error) {
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult
@@ -589,7 +669,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
return pipeWriter
}
}
- blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false) // Sets err to nil on success
+ blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success
return blobInfo, diffIDChan, err
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
}
@@ -626,7 +706,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
// and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
- canModifyBlob bool, isConfig bool) (types.BlobInfo, error) {
+ canModifyBlob bool, isConfig bool, bar *pb.ProgressBar) (types.BlobInfo, error) {
// The copying happens through a pipeline of connected io.Readers.
// === Input: srcStream
@@ -649,16 +729,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
}
isCompressed := decompressor != nil
-
- // === Report progress using a pb.Reader.
- bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
- bar.Output = c.reportWriter
- bar.SetMaxWidth(80)
- bar.ShowTimeLeft = false
- bar.ShowPercent = false
- bar.Start()
destStream = bar.NewProxyReader(destStream)
- defer bar.Finish()
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
@@ -761,7 +832,7 @@ func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
}()
- zipper := gzip.NewWriter(dest)
+ zipper := pgzip.NewWriter(dest)
defer zipper.Close()
_, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()
diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go
index d75c195b2..4b2ab022e 100644
--- a/vendor/github.com/containers/image/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/directory/directory_dest.go
@@ -124,6 +124,11 @@ func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil.
}
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *dirImageDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go
index 3625def80..59b625397 100644
--- a/vendor/github.com/containers/image/directory/directory_src.go
+++ b/vendor/github.com/containers/image/directory/directory_src.go
@@ -48,6 +48,11 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, manifest.GuessMIMEType(m), err
}
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *dirImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go
index 7f55dbe7f..23d2ac70f 100644
--- a/vendor/github.com/containers/image/docker/docker_client.go
+++ b/vendor/github.com/containers/image/docker/docker_client.go
@@ -13,6 +13,7 @@ import (
"path/filepath"
"strconv"
"strings"
+ "sync"
"time"
"github.com/containers/image/docker/reference"
@@ -84,6 +85,7 @@ type dockerClient struct {
registry string
client *http.Client
insecureSkipTLSVerify bool
+
// The following members are not set by newDockerClient and must be set by callers if needed.
username string
password string
@@ -95,8 +97,13 @@ type dockerClient struct {
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
challenges []challenge
supportsSignatures bool
- // Private state for setupRequestAuth
- tokenCache map[string]bearerToken
+
+ // Private state for setupRequestAuth (key: string, value: bearerToken)
+ tokenCache sync.Map
+ // detectPropertiesError caches the initial error.
+ detectPropertiesError error
+ // detectPropertiesOnce is used to execuute detectProperties() at most once in in makeRequest().
+ detectPropertiesOnce sync.Once
}
type authScope struct {
@@ -262,7 +269,6 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
registry: registry,
client: &http.Client{Transport: tr},
insecureSkipTLSVerify: skipVerify,
- tokenCache: map[string]bearerToken{},
}, nil
}
@@ -473,14 +479,18 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions)
scopes = append(scopes, *c.extraScope)
}
- token, ok := c.tokenCache[cacheKey]
- if !ok || time.Now().After(token.expirationTime) {
+ var token bearerToken
+ t, inCache := c.tokenCache.Load(cacheKey)
+ if inCache {
+ token = t.(bearerToken)
+ }
+ if !inCache || time.Now().After(token.expirationTime) {
t, err := c.getBearerToken(req.Context(), challenge, scopes)
if err != nil {
return err
}
token = *t
- c.tokenCache[cacheKey] = token
+ c.tokenCache.Store(cacheKey, token)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token))
return nil
@@ -545,9 +555,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
return newBearerTokenFromJSONBlob(tokenBlob)
}
-// detectProperties detects various properties of the registry.
-// See the dockerClient documentation for members which are affected by this.
-func (c *dockerClient) detectProperties(ctx context.Context) error {
+// detectPropertiesHelper performs the work of detectProperties which executes
+// it at most once.
+func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
if c.scheme != "" {
return nil
}
@@ -604,6 +614,13 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
return err
}
+// detectProperties detects various properties of the registry.
+// See the dockerClient documentation for members which are affected by this.
+func (c *dockerClient) detectProperties(ctx context.Context) error {
+ c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) })
+ return c.detectPropertiesError
+}
+
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
// using the original data structures.
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go
index 2f471f648..973d160d0 100644
--- a/vendor/github.com/containers/image/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/docker/docker_image_dest.go
@@ -111,6 +111,11 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
return len(p), nil
}
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
index fbed6297f..c88ff2f34 100644
--- a/vendor/github.com/containers/image/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -161,6 +161,11 @@ func getBlobSize(resp *http.Response) int64 {
return size
}
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
+ return true
+}
+
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go
index ad8a48a03..5f30eddbc 100644
--- a/vendor/github.com/containers/image/docker/tarfile/dest.go
+++ b/vendor/github.com/containers/image/docker/tarfile/dest.go
@@ -82,6 +82,11 @@ func (d *Destination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
}
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *Destination) HasThreadSafePutBlob() bool {
+ return false
+}
+
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go
index d94ed9783..889e5f8e8 100644
--- a/vendor/github.com/containers/image/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/docker/tarfile/src.go
@@ -397,6 +397,11 @@ func (r uncompressedReadCloser) Close() error {
return res
}
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *Source) HasThreadSafeGetBlob() bool {
+ return false
+}
+
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go
index 3997ac2ee..9571c37e2 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go
@@ -77,6 +77,11 @@ func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
return d.unpackedDest.IgnoresEmbeddedDockerReference()
}
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go
index 084d818f7..ca74f950b 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_src.go
@@ -76,6 +76,11 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest
return s.unpackedSrc.GetManifest(ctx, instanceDigest)
}
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go
index b5a6e08f8..db102184d 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go
@@ -107,6 +107,11 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil.
}
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *ociImageDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go
index 086a7040d..cc536f69e 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_src.go
@@ -92,6 +92,11 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, mimeType, nil
}
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *ociImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go
index 0cce1e6c7..814c3eea1 100644
--- a/vendor/github.com/containers/image/openshift/openshift.go
+++ b/vendor/github.com/containers/image/openshift/openshift.go
@@ -211,6 +211,11 @@ func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *
return s.docker.GetManifest(ctx, instanceDigest)
}
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -378,6 +383,11 @@ func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
return d.docker.IgnoresEmbeddedDockerReference()
}
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go
index 064898948..d69f4fa33 100644
--- a/vendor/github.com/containers/image/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/ostree/ostree_dest.go
@@ -4,7 +4,6 @@ package ostree
import (
"bytes"
- "compress/gzip"
"context"
"encoding/base64"
"encoding/json"
@@ -24,6 +23,7 @@ import (
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/archive"
+ "github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
@@ -132,6 +132,11 @@ func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil.
}
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (d *ostreeImageDestination) HasThreadSafePutBlob() bool {
+ return false
+}
+
// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
@@ -249,7 +254,7 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin
}
func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) {
- mfz := gzip.NewWriter(output)
+ mfz := pgzip.NewWriter(output)
defer mfz.Close()
metaPacker := storage.NewJSONPacker(mfz)
diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go
index e73cae198..df432c9f3 100644
--- a/vendor/github.com/containers/image/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/ostree/ostree_src.go
@@ -4,7 +4,6 @@ package ostree
import (
"bytes"
- "compress/gzip"
"context"
"encoding/base64"
"fmt"
@@ -17,6 +16,7 @@ import (
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/ioutils"
+ "github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
"github.com/pkg/errors"
@@ -255,6 +255,11 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
return getter.Get(path)
}
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *ostreeImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -304,7 +309,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
}
mf := bytes.NewReader(tarsplit)
- mfz, err := gzip.NewReader(mf)
+ mfz, err := pgzip.NewReader(mf)
if err != nil {
return nil, 0, err
}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
index 6da9f2805..459ae5c06 100644
--- a/vendor/github.com/containers/image/pkg/blobinfocache/default.go
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
@@ -54,7 +54,7 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
}
path := filepath.Join(dir, blobInfoCacheFilename)
if err := os.MkdirAll(dir, 0700); err != nil {
- logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", err)
+ logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
return NewMemoryCache()
}
diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go
index a39523e4f..aad2bfcf2 100644
--- a/vendor/github.com/containers/image/pkg/compression/compression.go
+++ b/vendor/github.com/containers/image/pkg/compression/compression.go
@@ -3,10 +3,10 @@ package compression
import (
"bytes"
"compress/bzip2"
- "compress/gzip"
"io"
"io/ioutil"
+ "github.com/klauspost/pgzip"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/ulikunitz/xz"
@@ -18,7 +18,7 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm.
func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
- return gzip.NewReader(r)
+ return pgzip.NewReader(r)
}
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
index afc7312d1..9e3e9cfe1 100644
--- a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
@@ -318,8 +318,37 @@ func FindUnqualifiedSearchRegistries(ctx *types.SystemContext) ([]Registry, erro
return unqualified, nil
}
-// FindRegistry returns the Registry with the longest prefix for ref. If no
-// Registry prefixes the image, nil is returned.
+// refMatchesPrefix returns true iff ref,
+// which is a registry, repository namespace, repository or image reference (as formatted by
+// reference.Domain(), reference.Named.Name() or reference.Reference.String()
+// — note that this requires the name to start with an explicit hostname!),
+// matches a Registry.Prefix value.
+// (This is split from the caller primarily to make testing easier.)
+func refMatchesPrefix(ref, prefix string) bool {
+ switch {
+ case len(ref) < len(prefix):
+ return false
+ case len(ref) == len(prefix):
+ return ref == prefix
+ case len(ref) > len(prefix):
+ if !strings.HasPrefix(ref, prefix) {
+ return false
+ }
+ c := ref[len(prefix)]
+ // This allows "example.com:5000" to match "example.com",
+ // which is unintended; that will get fixed eventually, DON'T RELY
+ // ON THE CURRENT BEHAVIOR.
+ return c == ':' || c == '/' || c == '@'
+ default:
+ panic("Internal error: impossible comparison outcome")
+ }
+}
+
+// FindRegistry returns the Registry with the longest prefix for ref,
+// which is a registry, repository namespace repository or image reference (as formatted by
+// reference.Domain(), reference.Named.Name() or reference.Reference.String()
+// — note that this requires the name to start with an explicit hostname!).
+// If no Registry prefixes the image, nil is returned.
func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
registries, err := GetRegistries(ctx)
if err != nil {
@@ -329,7 +358,7 @@ func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
reg := Registry{}
prefixLen := 0
for _, r := range registries {
- if strings.HasPrefix(ref, r.Prefix+"/") || ref == r.Prefix {
+ if refMatchesPrefix(ref, r.Prefix) {
length := len(r.Prefix)
if length > prefixLen {
reg = r
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
index bd6813119..b53fbdf6e 100644
--- a/vendor/github.com/containers/image/storage/storage_image.go
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -11,6 +11,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "sync"
"sync/atomic"
"github.com/containers/image/image"
@@ -47,6 +48,7 @@ type storageImageSource struct {
image *storage.Image
layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
+ getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
}
@@ -56,6 +58,7 @@ type storageImageDestination struct {
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
manifest []byte // Manifest contents, temporary
signatures []byte // Signature contents, temporary
+ putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
@@ -91,15 +94,20 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) {
}
// Reference returns the image reference that we used to find this image.
-func (s storageImageSource) Reference() types.ImageReference {
+func (s *storageImageSource) Reference() types.ImageReference {
return s.imageRef
}
// Close cleans up any resources we tied up while reading the image.
-func (s storageImageSource) Close() error {
+func (s *storageImageSource) Close() error {
return nil
}
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *storageImageSource) HasThreadSafeGetBlob() bool {
+ return true
+}
+
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
@@ -137,8 +145,10 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC
// Step through the list of matching layers. Tests may want to verify that if we have multiple layers
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could
// just go ahead and use the first one every time.
+ s.getBlobMutex.Lock()
i := s.layerPosition[info.Digest]
s.layerPosition[info.Digest] = i + 1
+ s.getBlobMutex.Unlock()
if len(layers) > 0 {
layer = layers[i%len(layers)]
}
@@ -300,7 +310,7 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
-func (s storageImageDestination) Reference() types.ImageReference {
+func (s *storageImageDestination) Reference() types.ImageReference {
return s.imageRef
}
@@ -309,7 +319,7 @@ func (s *storageImageDestination) Close() error {
return os.RemoveAll(s.directory)
}
-func (s storageImageDestination) DesiredLayerCompression() types.LayerCompression {
+func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression {
// We ultimately have to decompress layers to populate trees on disk,
// so callers shouldn't bother compressing them before handing them to
// us, if they're not already compressed.
@@ -320,6 +330,11 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
}
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (s *storageImageDestination) HasThreadSafePutBlob() bool {
+ return true
+}
+
// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
@@ -370,9 +385,11 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
return errorBlobInfo, ErrBlobSizeMismatch
}
// Record information about the blob.
+ s.putBlobMutex.Lock()
s.blobDiffIDs[hasher.Digest()] = diffID.Digest()
s.fileSizes[hasher.Digest()] = counter.Count
s.filenames[hasher.Digest()] = filename
+ s.putBlobMutex.Unlock()
blobDigest := blobinfo.Digest
if blobDigest.Validate() != nil {
blobDigest = hasher.Digest()
@@ -398,6 +415,9 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ // lock the entire method as it executes fairly quickly
+ s.putBlobMutex.Lock()
+ defer s.putBlobMutex.Unlock()
if blobinfo.Digest == "" {
return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
}
diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go
index ee963b8d8..76e3e755f 100644
--- a/vendor/github.com/containers/image/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/tarball/tarball_src.go
@@ -2,7 +2,6 @@ package tarball
import (
"bytes"
- "compress/gzip"
"context"
"encoding/json"
"fmt"
@@ -14,7 +13,7 @@ import (
"time"
"github.com/containers/image/types"
-
+ "github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
imgspecs "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -77,7 +76,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
// Set up to digest the file after we maybe decompress it.
diffIDdigester := digest.Canonical.Digester()
- uncompressed, err := gzip.NewReader(reader)
+ uncompressed, err := pgzip.NewReader(reader)
if err == nil {
// It is compressed, so the diffID is the digest of the uncompressed version
reader = io.TeeReader(uncompressed, diffIDdigester.Hash())
@@ -207,6 +206,11 @@ func (is *tarballImageSource) Close() error {
return nil
}
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (is *tarballImageSource) HasThreadSafeGetBlob() bool {
+ return false
+}
+
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go
index dda332776..9fdab2314 100644
--- a/vendor/github.com/containers/image/types/types.go
+++ b/vendor/github.com/containers/image/types/types.go
@@ -198,6 +198,8 @@ type ImageSource interface {
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error)
+ // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+ HasThreadSafeGetBlob() bool
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
@@ -264,6 +266,8 @@ type ImageDestination interface {
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error)
+ // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+ HasThreadSafePutBlob() bool
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
index 88537981a..bbbb1a458 100644
--- a/vendor/github.com/containers/image/vendor.conf
+++ b/vendor/github.com/containers/image/vendor.conf
@@ -16,7 +16,7 @@ github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade
github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
-github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
+github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
@@ -26,8 +26,9 @@ github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
github.com/vbatts/tar-split v0.10.2
golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
+golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
-gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678
+gopkg.in/cheggaaa/pb.v1 v1.0.27
gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
github.com/xeipuuv/gojsonschema master
@@ -36,7 +37,7 @@ github.com/xeipuuv/gojsonpointer master
github.com/tchap/go-patricia v2.2.6
github.com/opencontainers/selinux 077c8b6d1c18456fb7c792bc0de52295a0d1900e
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
-github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
+github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
github.com/pquerna/ffjson master
github.com/syndtr/gocapability master
@@ -44,3 +45,6 @@ github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
github.com/ulikunitz/xz v0.5.4
github.com/boltdb/bolt master
+github.com/klauspost/pgzip v1.2.1
+github.com/klauspost/compress v1.4.1
+github.com/klauspost/cpuid v1.2.0
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
index 53aa217c9..58abca477 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
@@ -26,7 +26,7 @@ type directLVMConfig struct {
var (
errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified")
errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100")
- errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm")
+ errMissingSetupDevice = errors.New("must provide device path in `dm.directlvm_device` in order to configure direct-lvm")
)
func validateLVMConfig(cfg directLVMConfig) error {
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index df736c0a9..2455b6736 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -14,6 +14,7 @@ import (
"strconv"
"strings"
"sync"
+ "syscall"
"github.com/containers/storage/drivers"
"github.com/containers/storage/drivers/overlayutils"
@@ -165,6 +166,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
if err != nil {
os.Remove(filepath.Join(home, linkDir))
os.Remove(home)
+ patherr, ok := err.(*os.PathError)
+ if ok && patherr.Err == syscall.ENOSPC {
+ return nil, err
+ }
return nil, errors.Wrap(err, "kernel does not support overlay fs")
}
}
@@ -285,6 +290,12 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
exec.Command("modprobe", "overlay").Run()
layerDir, err := ioutil.TempDir(home, "compat")
+ if err != nil {
+ patherr, ok := err.(*os.PathError)
+ if ok && patherr.Err == syscall.ENOSPC {
+ return false, err
+ }
+ }
if err == nil {
// Check if reading the directory's contents populates the d_type field, which is required
// for proper operation of the overlay filesystem.
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 0b532eb77..299d2f818 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -2,7 +2,6 @@ package storage
import (
"bytes"
- "compress/gzip"
"encoding/json"
"fmt"
"io"
@@ -20,6 +19,7 @@ import (
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/truncindex"
+ "github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
@@ -1055,7 +1055,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
}
defer tsfile.Close()
- decompressor, err := gzip.NewReader(tsfile)
+ decompressor, err := pgzip.NewReader(tsfile)
if err != nil {
return nil, err
}
@@ -1116,9 +1116,9 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter)
tsdata := bytes.Buffer{}
- compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed)
+ compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
if err != nil {
- compressor = gzip.NewWriter(&tsdata)
+ compressor = pgzip.NewWriter(&tsdata)
}
metadata := storage.NewJSONPacker(compressor)
uncompressed, err := archive.DecompressStream(defragmented)
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 8d6eaacf3..228d8bb82 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -5,7 +5,6 @@ import (
"bufio"
"bytes"
"compress/bzip2"
- "compress/gzip"
"fmt"
"io"
"io/ioutil"
@@ -22,6 +21,7 @@ import (
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/promise"
"github.com/containers/storage/pkg/system"
+ gzip "github.com/klauspost/pgzip"
rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/sirupsen/logrus"
)
@@ -499,6 +499,8 @@ func (ta *tarAppender) addTarFile(path, name string) error {
hdr.Gid = ta.ChownOpts.GID
}
+ maybeTruncateHeaderModTime(hdr)
+
if ta.WhiteoutConverter != nil {
wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
if err != nil {
@@ -640,11 +642,12 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
var errors []string
for key, value := range hdr.Xattrs {
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
- if err == syscall.ENOTSUP {
+ if err == syscall.ENOTSUP || (err == syscall.EPERM && inUserns) {
// We ignore errors here because not all graphdrivers support
// xattrs *cough* old versions of AUFS *cough*. However only
// ENOTSUP should be emitted in that case, otherwise we still
- // bail.
+ // bail. We also ignore EPERM errors if we are running in a
+ // user namespace.
errors = append(errors, err.Error())
continue
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go
index 22b8b48cc..7bc44a566 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_110.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go
@@ -4,8 +4,19 @@ package archive
import (
"archive/tar"
+ "time"
)
func copyPassHeader(hdr *tar.Header) {
hdr.Format = tar.FormatPAX
}
+
+func maybeTruncateHeaderModTime(hdr *tar.Header) {
+ if hdr.Format == tar.FormatUnknown {
+ // one of the first things archive/tar does is round this
+ // value, possibly up, if the format isn't specified, while we
+ // are much better equipped to handle truncation when scanning
+ // for changes between source and an extracted copy of this
+ hdr.ModTime = hdr.ModTime.Truncate(time.Second)
+ }
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go
index d10d595fa..d19811fdb 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_19.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go
@@ -8,3 +8,6 @@ import (
func copyPassHeader(hdr *tar.Header) {
}
+
+func maybeTruncateHeaderModTime(hdr *tar.Header) {
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
index 9b8103e4d..2ba44b85d 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: pkg/archive/archive.go
+// source: ./pkg/archive/archive.go
package archive
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index e0dd1b92f..a166799c6 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -1070,7 +1070,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, read
}
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, rc)
if err != nil {
- return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q", parentLayer.ID)
+ return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q", layer.ID)
}
if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf
index fa52584d7..936321b12 100644
--- a/vendor/github.com/containers/storage/vendor.conf
+++ b/vendor/github.com/containers/storage/vendor.conf
@@ -23,3 +23,6 @@ golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
gotest.tools master
github.com/google/go-cmp master
+github.com/klauspost/pgzip v1.2.1
+github.com/klauspost/compress v1.4.1
+github.com/klauspost/cpuid v1.2.0
diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
index 70f9c5564..495db809e 100644
--- a/vendor/github.com/containers/storage/pkg/archive/example_changes.go
+++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
@@ -13,7 +13,7 @@ import (
"os"
"path"
- "github.com/containers/storage/pkg/archive"
+ "github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
)
@@ -39,7 +39,7 @@ func main() {
if len(*flNewDir) == 0 {
var err error
- newDir, err = ioutil.TempDir("", "storage-test-newDir")
+ newDir, err = ioutil.TempDir("", "docker-test-newDir")
if err != nil {
log.Fatal(err)
}
@@ -52,7 +52,7 @@ func main() {
}
if len(*flOldDir) == 0 {
- oldDir, err := ioutil.TempDir("", "storage-test-oldDir")
+ oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
if err != nil {
log.Fatal(err)
}
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/generate.go b/vendor/github.com/docker/docker/profiles/seccomp/generate.go
new file mode 100644
index 000000000..32f22bb37
--- /dev/null
+++ b/vendor/github.com/docker/docker/profiles/seccomp/generate.go
@@ -0,0 +1,32 @@
+// +build ignore
+
+package main
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/profiles/seccomp"
+)
+
+// saves the default seccomp profile as a json file so people can use it as a
+// base for their own custom profiles
+func main() {
+ wd, err := os.Getwd()
+ if err != nil {
+ panic(err)
+ }
+ f := filepath.Join(wd, "default.json")
+
+ // write the default profile to the file
+ b, err := json.MarshalIndent(seccomp.DefaultProfile(), "", "\t")
+ if err != nil {
+ panic(err)
+ }
+
+ if err := ioutil.WriteFile(f, b, 0644); err != nil {
+ panic(err)
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE
new file mode 100644
index 000000000..744875676
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
new file mode 100644
index 000000000..280d54890
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -0,0 +1,160 @@
+# compress
+
+This package is based on an optimized Deflate function, which is used by gzip/zip/zlib packages.
+
+It offers slightly better compression at lower compression settings, and up to 3x faster encoding at highest compression level.
+
+* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
+* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
+* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
+* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
+
+[![Build Status](https://travis-ci.org/klauspost/compress.svg?branch=master)](https://travis-ci.org/klauspost/compress)
+[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
+
+# changelog
+
+* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
+* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
+* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
+* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
+* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
+* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
+* May 28, 2017: Reduce allocations when resetting decoder.
+* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
+* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
+* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
+* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
+* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
+* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
+* Mar 24, 2016: Small speedup for level 1-3.
+* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
+* Feb 19, 2016: Handle small payloads faster in level 1-3.
+* Feb 19, 2016: Added faster level 2 + 3 compression modes.
+* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
+* Feb 14, 2016: Snappy: Merge upstream changes.
+* Feb 14, 2016: Snappy: Fix aggressive skipping.
+* Feb 14, 2016: Snappy: Update benchmark.
+* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
+* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
+* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
+* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
+* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
+* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
+* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
+* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
+* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
+* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
+* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
+* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
+* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
+* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
+* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
+
+# usage
+
+The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
+
+| old import | new import |
+|--------------------|-----------------------------------------|
+| `compress/gzip` | `github.com/klauspost/compress/gzip` |
+| `compress/zlib` | `github.com/klauspost/compress/zlib` |
+| `archive/zip` | `github.com/klauspost/compress/zip` |
+| `compress/flate` | `github.com/klauspost/compress/flate` |
+
+You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
+
+The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
+
+Currently there is only minor speedup on decompression (mostly CRC32 calculation).
+
+# Performance Update 2018
+
+It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
+
+The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
+
+The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard libary at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
+
+The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
+
+
+## Overall differences.
+
+There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
+
+The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
+
+This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
+
+There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
+
+## Web Content
+
+This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
+
+Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
+
+Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
+
+## Object files
+
+This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
+
+The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
+
+The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
+
+## Highly Compressible File
+
+This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
+
+It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
+
+So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
+
+## Medium-High Compressible
+
+This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
+
+We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
+
+## Medium Compressible
+
+I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
+
+The most notable thing is how quickly the standard libary drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
+
+
+## Un-compressible Content
+
+This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
+
+
+# linear time compression (huffman only)
+
+This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
+
+This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
+
+Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
+
+The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression raio can never be better than 8:1 (12.5%).
+
+The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
+
+For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
+
+This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
+
+
+# snappy package
+
+The standard snappy package has now been improved. This repo contains a copy of the snappy repo.
+
+I would advise to use the standard package: https://github.com/golang/snappy
+
+
+# license
+
+This code is licensed under the same conditions as the original Go code. See LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/flate/copy.go b/vendor/github.com/klauspost/compress/flate/copy.go
new file mode 100644
index 000000000..a3200a8f4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/copy.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// forwardCopy is like the built-in copy function except that it always goes
+// forward from the start, even if the dst and src overlap.
+// It is equivalent to:
+// for i := 0; i < n; i++ {
+// mem[dst+i] = mem[src+i]
+// }
+func forwardCopy(mem []byte, dst, src, n int) {
+ if dst <= src {
+ copy(mem[dst:dst+n], mem[src:src+n])
+ return
+ }
+ for {
+ if dst >= src+n {
+ copy(mem[dst:dst+n], mem[src:src+n])
+ return
+ }
+ // There is some forward overlap. The destination
+ // will be filled with a repeated pattern of mem[src:src+k].
+ // We copy one instance of the pattern here, then repeat.
+ // Each time around this loop k will double.
+ k := dst - src
+ copy(mem[dst:dst+k], mem[src:src+k])
+ n -= k
+ dst += k
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go
new file mode 100644
index 000000000..8298d309a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go
@@ -0,0 +1,42 @@
+//+build !noasm
+//+build !appengine
+//+build !gccgo
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+
+package flate
+
+import (
+ "github.com/klauspost/cpuid"
+)
+
+// crc32sse returns a hash for the first 4 bytes of the slice
+// len(a) must be >= 4.
+//go:noescape
+func crc32sse(a []byte) uint32
+
+// crc32sseAll calculates hashes for each 4-byte set in a.
+// dst must be east len(a) - 4 in size.
+// The size is not checked by the assembly.
+//go:noescape
+func crc32sseAll(a []byte, dst []uint32)
+
+// matchLenSSE4 returns the number of matching bytes in a and b
+// up to length 'max'. Both slices must be at least 'max'
+// bytes in size.
+//
+// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions.
+//
+//go:noescape
+func matchLenSSE4(a, b []byte, max int) int
+
+// histogram accumulates a histogram of b in h.
+// h must be at least 256 entries in length,
+// and must be cleared before calling this function.
+//go:noescape
+func histogram(b []byte, h []int32)
+
+// Detect SSE 4.2 feature.
+func init() {
+ useSSE42 = cpuid.CPU.SSE42()
+}
diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s
new file mode 100644
index 000000000..a79943727
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s
@@ -0,0 +1,214 @@
+//+build !noasm
+//+build !appengine
+//+build !gccgo
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+
+// func crc32sse(a []byte) uint32
+TEXT ·crc32sse(SB), 4, $0
+ MOVQ a+0(FP), R10
+ XORQ BX, BX
+
+ // CRC32 dword (R10), EBX
+ BYTE $0xF2; BYTE $0x41; BYTE $0x0f
+ BYTE $0x38; BYTE $0xf1; BYTE $0x1a
+
+ MOVL BX, ret+24(FP)
+ RET
+
+// func crc32sseAll(a []byte, dst []uint32)
+TEXT ·crc32sseAll(SB), 4, $0
+ MOVQ a+0(FP), R8 // R8: src
+ MOVQ a_len+8(FP), R10 // input length
+ MOVQ dst+24(FP), R9 // R9: dst
+ SUBQ $4, R10
+ JS end
+ JZ one_crc
+ MOVQ R10, R13
+ SHRQ $2, R10 // len/4
+ ANDQ $3, R13 // len&3
+ XORQ BX, BX
+ ADDQ $1, R13
+ TESTQ R10, R10
+ JZ rem_loop
+
+crc_loop:
+ MOVQ (R8), R11
+ XORQ BX, BX
+ XORQ DX, DX
+ XORQ DI, DI
+ MOVQ R11, R12
+ SHRQ $8, R11
+ MOVQ R12, AX
+ MOVQ R11, CX
+ SHRQ $16, R12
+ SHRQ $16, R11
+ MOVQ R12, SI
+
+ // CRC32 EAX, EBX
+ BYTE $0xF2; BYTE $0x0f
+ BYTE $0x38; BYTE $0xf1; BYTE $0xd8
+
+ // CRC32 ECX, EDX
+ BYTE $0xF2; BYTE $0x0f
+ BYTE $0x38; BYTE $0xf1; BYTE $0xd1
+
+ // CRC32 ESI, EDI
+ BYTE $0xF2; BYTE $0x0f
+ BYTE $0x38; BYTE $0xf1; BYTE $0xfe
+ MOVL BX, (R9)
+ MOVL DX, 4(R9)
+ MOVL DI, 8(R9)
+
+ XORQ BX, BX
+ MOVL R11, AX
+
+ // CRC32 EAX, EBX
+ BYTE $0xF2; BYTE $0x0f
+ BYTE $0x38; BYTE $0xf1; BYTE $0xd8
+ MOVL BX, 12(R9)
+
+ ADDQ $16, R9
+ ADDQ $4, R8
+ XORQ BX, BX
+ SUBQ $1, R10
+ JNZ crc_loop
+
+rem_loop:
+ MOVL (R8), AX
+
+ // CRC32 EAX, EBX
+ BYTE $0xF2; BYTE $0x0f
+ BYTE $0x38; BYTE $0xf1; BYTE $0xd8
+
+ MOVL BX, (R9)
+ ADDQ $4, R9
+ ADDQ $1, R8
+ XORQ BX, BX
+ SUBQ $1, R13
+ JNZ rem_loop
+
+end:
+ RET
+
+one_crc:
+ MOVQ $1, R13
+ XORQ BX, BX
+ JMP rem_loop
+
+// func matchLenSSE4(a, b []byte, max int) int
+TEXT ·matchLenSSE4(SB), 4, $0
+ MOVQ a_base+0(FP), SI
+ MOVQ b_base+24(FP), DI
+ MOVQ DI, DX
+ MOVQ max+48(FP), CX
+
+cmp8:
+ // As long as we are 8 or more bytes before the end of max, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ CX, $8
+ JLT cmp1
+ MOVQ (SI), AX
+ MOVQ (DI), BX
+ CMPQ AX, BX
+ JNE bsf
+ ADDQ $8, SI
+ ADDQ $8, DI
+ SUBQ $8, CX
+ JMP cmp8
+
+bsf:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, DI
+
+ // Subtract off &b[0] to convert from &b[ret] to ret, and return.
+ SUBQ DX, DI
+ MOVQ DI, ret+56(FP)
+ RET
+
+cmp1:
+ // In the slices' tail, compare 1 byte at a time.
+ CMPQ CX, $0
+ JEQ matchLenEnd
+ MOVB (SI), AX
+ MOVB (DI), BX
+ CMPB AX, BX
+ JNE matchLenEnd
+ ADDQ $1, SI
+ ADDQ $1, DI
+ SUBQ $1, CX
+ JMP cmp1
+
+matchLenEnd:
+ // Subtract off &b[0] to convert from &b[ret] to ret, and return.
+ SUBQ DX, DI
+ MOVQ DI, ret+56(FP)
+ RET
+
+// func histogram(b []byte, h []int32)
+TEXT ·histogram(SB), 4, $0
+ MOVQ b+0(FP), SI // SI: &b
+ MOVQ b_len+8(FP), R9 // R9: len(b)
+ MOVQ h+24(FP), DI // DI: Histogram
+ MOVQ R9, R8
+ SHRQ $3, R8
+ JZ hist1
+ XORQ R11, R11
+
+loop_hist8:
+ MOVQ (SI), R10
+
+ MOVB R10, R11
+ INCL (DI)(R11*4)
+ SHRQ $8, R10
+
+ MOVB R10, R11
+ INCL (DI)(R11*4)
+ SHRQ $8, R10
+
+ MOVB R10, R11
+ INCL (DI)(R11*4)
+ SHRQ $8, R10
+
+ MOVB R10, R11
+ INCL (DI)(R11*4)
+ SHRQ $8, R10
+
+ MOVB R10, R11
+ INCL (DI)(R11*4)
+ SHRQ $8, R10
+
+ MOVB R10, R11
+ INCL (DI)(R11*4)
+ SHRQ $8, R10
+
+ MOVB R10, R11
+ INCL (DI)(R11*4)
+ SHRQ $8, R10
+
+ INCL (DI)(R10*4)
+
+ ADDQ $8, SI
+ DECQ R8
+ JNZ loop_hist8
+
+hist1:
+ ANDQ $7, R9
+ JZ end_hist
+ XORQ R10, R10
+
+loop_hist1:
+ MOVB (SI), R10
+ INCL (DI)(R10*4)
+ INCQ SI
+ DECQ R9
+ JNZ loop_hist1
+
+end_hist:
+ RET
diff --git a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go
new file mode 100644
index 000000000..dcf43bd50
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go
@@ -0,0 +1,35 @@
+//+build !amd64 noasm appengine gccgo
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+
+package flate
+
+func init() {
+ useSSE42 = false
+}
+
+// crc32sse should never be called.
+func crc32sse(a []byte) uint32 {
+ panic("no assembler")
+}
+
+// crc32sseAll should never be called.
+func crc32sseAll(a []byte, dst []uint32) {
+ panic("no assembler")
+}
+
+// matchLenSSE4 should never be called.
+func matchLenSSE4(a, b []byte, max int) int {
+ panic("no assembler")
+ return 0
+}
+
+// histogram accumulates a histogram of b in h.
+//
+// len(h) must be >= 256, and h's elements must be all zeroes.
+func histogram(b []byte, h []int32) {
+ h = h[:256]
+ for _, t := range b {
+ h[t]++
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
new file mode 100644
index 000000000..9e6e7ff0c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -0,0 +1,1353 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright (c) 2015 Klaus Post
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ NoCompression = 0
+ BestSpeed = 1
+ BestCompression = 9
+ DefaultCompression = -1
+
+ // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
+ // entropy encoding. This mode is useful in compressing data that has
+ // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
+ // that lacks an entropy encoder. Compression gains are achieved when
+ // certain bytes in the input stream occur more frequently than others.
+ //
+ // Note that HuffmanOnly produces a compressed output that is
+ // RFC 1951 compliant. That is, any valid DEFLATE decompressor will
+ // continue to be able to decompress this output.
+ HuffmanOnly = -2
+ ConstantCompression = HuffmanOnly // compatibility alias.
+
+ logWindowSize = 15
+ windowSize = 1 << logWindowSize
+ windowMask = windowSize - 1
+ logMaxOffsetSize = 15 // Standard DEFLATE
+ minMatchLength = 4 // The smallest match that the compressor looks for
+ maxMatchLength = 258 // The longest match for the compressor
+ minOffsetSize = 1 // The shortest offset that makes any sense
+
+ // The maximum number of tokens we put into a single flat block, just too
+ // stop things from getting too large.
+ maxFlateBlockTokens = 1 << 14
+ maxStoreBlockSize = 65535
+ hashBits = 17 // After 17 performance degrades
+ hashSize = 1 << hashBits
+ hashMask = (1 << hashBits) - 1
+ hashShift = (hashBits + minMatchLength - 1) / minMatchLength
+ maxHashOffset = 1 << 24
+
+ skipNever = math.MaxInt32
+)
+
+var useSSE42 bool
+
+type compressionLevel struct {
+ good, lazy, nice, chain, fastSkipHashing, level int
+}
+
+// Compression levels have been rebalanced from zlib deflate defaults
+// to give a bigger spread in speed and compression.
+// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
+var levels = []compressionLevel{
+ {}, // 0
+ // Level 1-4 uses specialized algorithm - values not used
+ {0, 0, 0, 0, 0, 1},
+ {0, 0, 0, 0, 0, 2},
+ {0, 0, 0, 0, 0, 3},
+ {0, 0, 0, 0, 0, 4},
+ // For levels 5-6 we don't bother trying with lazy matches.
+ // Lazy matching is at least 30% slower, with 1.5% increase.
+ {6, 0, 12, 8, 12, 5},
+ {8, 0, 24, 16, 16, 6},
+ // Levels 7-9 use increasingly more lazy matching
+ // and increasingly stringent conditions for "good enough".
+ {8, 8, 24, 16, skipNever, 7},
+ {10, 16, 24, 64, skipNever, 8},
+ {32, 258, 258, 4096, skipNever, 9},
+}
+
+type compressor struct {
+ compressionLevel
+
+ w *huffmanBitWriter
+ bulkHasher func([]byte, []uint32)
+
+ // compression algorithm
+ fill func(*compressor, []byte) int // copy data to window
+ step func(*compressor) // process window
+ sync bool // requesting flush
+
+ // Input hash chains
+ // hashHead[hashValue] contains the largest inputIndex with the specified hash value
+ // If hashHead[hashValue] is within the current window, then
+ // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+ // with the same hash value.
+ chainHead int
+ hashHead [hashSize]uint32
+ hashPrev [windowSize]uint32
+ hashOffset int
+
+ // input window: unprocessed data is window[index:windowEnd]
+ index int
+ window []byte
+ windowEnd int
+ blockStart int // window index where current tokens start
+ byteAvailable bool // if true, still need to process window[index-1].
+
+ // queued output tokens
+ tokens tokens
+
+ // deflate state
+ length int
+ offset int
+ hash uint32
+ maxInsertIndex int
+ err error
+ ii uint16 // position of last match, intended to overflow to reset.
+
+ snap snappyEnc
+ hashMatch [maxMatchLength + minMatchLength]uint32
+}
+
+func (d *compressor) fillDeflate(b []byte) int {
+ if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
+ // shift the window by windowSize
+ copy(d.window[:], d.window[windowSize:2*windowSize])
+ d.index -= windowSize
+ d.windowEnd -= windowSize
+ if d.blockStart >= windowSize {
+ d.blockStart -= windowSize
+ } else {
+ d.blockStart = math.MaxInt32
+ }
+ d.hashOffset += windowSize
+ if d.hashOffset > maxHashOffset {
+ delta := d.hashOffset - 1
+ d.hashOffset -= delta
+ d.chainHead -= delta
+ // Iterate over slices instead of arrays to avoid copying
+ // the entire table onto the stack (Issue #18625).
+ for i, v := range d.hashPrev[:] {
+ if int(v) > delta {
+ d.hashPrev[i] = uint32(int(v) - delta)
+ } else {
+ d.hashPrev[i] = 0
+ }
+ }
+ for i, v := range d.hashHead[:] {
+ if int(v) > delta {
+ d.hashHead[i] = uint32(int(v) - delta)
+ } else {
+ d.hashHead[i] = 0
+ }
+ }
+ }
+ }
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+func (d *compressor) writeBlock(tok tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ var window []byte
+ if d.blockStart <= index {
+ window = d.window[d.blockStart:index]
+ }
+ d.blockStart = index
+ d.w.writeBlock(tok.tokens[:tok.n], eof, window)
+ return d.w.err
+ }
+ return nil
+}
+
+// writeBlockSkip writes the current block and uses the number of tokens
+// to determine if the block should be stored on no matches, or
+// only huffman encoded.
+func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ if d.blockStart <= index {
+ window := d.window[d.blockStart:index]
+ // If we removed less than a 64th of all literals
+ // we huffman compress the block.
+ if int(tok.n) > len(window)-int(tok.n>>6) {
+ d.w.writeBlockHuff(eof, window)
+ } else {
+ // Write a dynamic huffman block.
+ d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window)
+ }
+ } else {
+ d.w.writeBlock(tok.tokens[:tok.n], eof, nil)
+ }
+ d.blockStart = index
+ return d.w.err
+ }
+ return nil
+}
+
+// fillWindow will fill the current window with the supplied
+// dictionary and calculate all hashes.
+// This is much faster than doing a full encode.
+// Should only be used after a start/reset.
+func (d *compressor) fillWindow(b []byte) {
+ // Do not fill window if we are in store-only mode,
+ // use constant or Snappy compression.
+ switch d.compressionLevel.level {
+ case 0, 1, 2:
+ return
+ }
+ // If we are given too much, cut it.
+ if len(b) > windowSize {
+ b = b[len(b)-windowSize:]
+ }
+ // Add all to window.
+ n := copy(d.window[d.windowEnd:], b)
+
+ // Calculate 256 hashes at the time (more L1 cache hits)
+ loops := (n + 256 - minMatchLength) / 256
+ for j := 0; j < loops; j++ {
+ startindex := j * 256
+ end := startindex + 256 + minMatchLength - 1
+ if end > n {
+ end = n
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+
+ if dstSize <= 0 {
+ continue
+ }
+
+ dst := d.hashMatch[:dstSize]
+ d.bulkHasher(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ d.hashPrev[di&windowMask] = d.hashHead[newH]
+ // Set the head of the hash chain to us.
+ d.hashHead[newH] = uint32(di + d.hashOffset)
+ }
+ d.hash = newH
+ }
+ // Update window information.
+ d.windowEnd += n
+ d.index = n
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
+func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
+ minMatchLook := maxMatchLength
+ if lookahead < minMatchLook {
+ minMatchLook = lookahead
+ }
+
+ win := d.window[0 : pos+minMatchLook]
+
+ // We quit when we get a match that's at least nice long
+ nice := len(win) - pos
+ if d.nice < nice {
+ nice = d.nice
+ }
+
+ // If we've got a match that's good enough, only look in 1/4 the chain.
+ tries := d.chain
+ length = prevLength
+ if length >= d.good {
+ tries >>= 2
+ }
+
+ wEnd := win[pos+length]
+ wPos := win[pos:]
+ minIndex := pos - windowSize
+
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:], wPos, minMatchLook)
+
+ if n > length && (n > minMatchLength || pos-i <= 4096) {
+ length = n
+ offset = pos - i
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ if i == minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.hashPrev[i&windowMask]) - d.hashOffset
+ if i < minIndex || i < 0 {
+ break
+ }
+ }
+ return
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
+func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
+ minMatchLook := maxMatchLength
+ if lookahead < minMatchLook {
+ minMatchLook = lookahead
+ }
+
+ win := d.window[0 : pos+minMatchLook]
+
+ // We quit when we get a match that's at least nice long
+ nice := len(win) - pos
+ if d.nice < nice {
+ nice = d.nice
+ }
+
+ // If we've got a match that's good enough, only look in 1/4 the chain.
+ tries := d.chain
+ length = prevLength
+ if length >= d.good {
+ tries >>= 2
+ }
+
+ wEnd := win[pos+length]
+ wPos := win[pos:]
+ minIndex := pos - windowSize
+
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLenSSE4(win[i:], wPos, minMatchLook)
+
+ if n > length && (n > minMatchLength || pos-i <= 4096) {
+ length = n
+ offset = pos - i
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ if i == minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.hashPrev[i&windowMask]) - d.hashOffset
+ if i < minIndex || i < 0 {
+ break
+ }
+ }
+ return
+}
+
+func (d *compressor) writeStoredBlock(buf []byte) error {
+ if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.writeBytes(buf)
+ return d.w.err
+}
+
+const hashmul = 0x1e35a7bd
+
+// hash4 returns a hash representation of the first 4 bytes
+// of the supplied slice.
+// The caller must ensure that len(b) >= 4.
+func hash4(b []byte) uint32 {
+ return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits)
+}
+
+// bulkHash4 will compute hashes using the same
+// algorithm as hash4
+func bulkHash4(b []byte, dst []uint32) {
+ if len(b) < minMatchLength {
+ return
+ }
+ hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ dst[0] = (hb * hashmul) >> (32 - hashBits)
+ end := len(b) - minMatchLength + 1
+ for i := 1; i < end; i++ {
+ hb = (hb << 8) | uint32(b[i+3])
+ dst[i] = (hb * hashmul) >> (32 - hashBits)
+ }
+}
+
+// matchLen returns the number of matching bytes in a and b
+// up to length 'max'. Both slices must be at least 'max'
+// bytes in size.
+func matchLen(a, b []byte, max int) int {
+ a = a[:max]
+ b = b[:len(a)]
+ for i, av := range a {
+ if b[i] != av {
+ return i
+ }
+ }
+ return max
+}
+
+func (d *compressor) initDeflate() {
+ d.window = make([]byte, 2*windowSize)
+ d.hashOffset = 1
+ d.length = minMatchLength - 1
+ d.offset = 0
+ d.byteAvailable = false
+ d.index = 0
+ d.hash = 0
+ d.chainHead = -1
+ d.bulkHasher = bulkHash4
+ if useSSE42 {
+ d.bulkHasher = crc32sseAll
+ }
+}
+
+// Assumes that d.fastSkipHashing != skipNever,
+// otherwise use deflateLazy
+func (d *compressor) deflate() {
+
+ // Sanity enables additional runtime tests.
+ // It's intended to be used during development
+ // to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+ return
+ }
+
+ d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+ if d.index < d.maxInsertIndex {
+ d.hash = hash4(d.window[d.index : d.index+minMatchLength])
+ }
+
+ for {
+ if sanity && d.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ lookahead := d.windowEnd - d.index
+ if lookahead < minMatchLength+maxMatchLength {
+ if !d.sync {
+ return
+ }
+ if sanity && d.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ if lookahead == 0 {
+ if d.tokens.n > 0 {
+ if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ return
+ }
+ }
+ if d.index < d.maxInsertIndex {
+ // Update the hash
+ d.hash = hash4(d.window[d.index : d.index+minMatchLength])
+ ch := d.hashHead[d.hash&hashMask]
+ d.chainHead = int(ch)
+ d.hashPrev[d.index&windowMask] = ch
+ d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset)
+ }
+ d.length = minMatchLength - 1
+ d.offset = 0
+ minIndex := d.index - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+
+ if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
+ if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
+ d.length = newLength
+ d.offset = newOffset
+ }
+ }
+ if d.length >= minMatchLength {
+ d.ii = 0
+ // There was a match at the previous step, and the current match is
+ // not better. Output the previous match.
+ // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
+ d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
+ d.tokens.n++
+ // Insert in the hash table all strings up to the end of the match.
+ // index and index-1 are already inserted. If there is not enough
+ // lookahead, the last two strings are not inserted into the hash
+ // table.
+ if d.length <= d.fastSkipHashing {
+ var newIndex int
+ newIndex = d.index + d.length
+ // Calculate missing hashes
+ end := newIndex
+ if end > d.maxInsertIndex {
+ end = d.maxInsertIndex
+ }
+ end += minMatchLength - 1
+ startindex := d.index + 1
+ if startindex > d.maxInsertIndex {
+ startindex = d.maxInsertIndex
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+ if dstSize > 0 {
+ dst := d.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ d.hashPrev[di&windowMask] = d.hashHead[newH]
+ // Set the head of the hash chain to us.
+ d.hashHead[newH] = uint32(di + d.hashOffset)
+ }
+ d.hash = newH
+ }
+ d.index = newIndex
+ } else {
+ // For matches this long, we don't bother inserting each individual
+ // item into the table.
+ d.index += d.length
+ if d.index < d.maxInsertIndex {
+ d.hash = hash4(d.window[d.index : d.index+minMatchLength])
+ }
+ }
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ } else {
+ d.ii++
+ end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1
+ if end > d.windowEnd {
+ end = d.windowEnd
+ }
+ for i := d.index; i < end; i++ {
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
+ d.tokens.n++
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ }
+ d.index = end
+ }
+ }
+}
+
+// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
+// meaning it always has lazy matching on.
+func (d *compressor) deflateLazy() {
+ // Sanity enables additional runtime tests.
+ // It's intended to be used during development
+ // to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+ return
+ }
+
+ d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+ if d.index < d.maxInsertIndex {
+ d.hash = hash4(d.window[d.index : d.index+minMatchLength])
+ }
+
+ for {
+ if sanity && d.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ lookahead := d.windowEnd - d.index
+ if lookahead < minMatchLength+maxMatchLength {
+ if !d.sync {
+ return
+ }
+ if sanity && d.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ if lookahead == 0 {
+ // Flush current output block if any.
+ if d.byteAvailable {
+ // There is still one pending token that needs to be flushed
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.n++
+ d.byteAvailable = false
+ }
+ if d.tokens.n > 0 {
+ if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ return
+ }
+ }
+ if d.index < d.maxInsertIndex {
+ // Update the hash
+ d.hash = hash4(d.window[d.index : d.index+minMatchLength])
+ ch := d.hashHead[d.hash&hashMask]
+ d.chainHead = int(ch)
+ d.hashPrev[d.index&windowMask] = ch
+ d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset)
+ }
+ prevLength := d.length
+ prevOffset := d.offset
+ d.length = minMatchLength - 1
+ d.offset = 0
+ minIndex := d.index - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+
+ if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+ if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
+ d.length = newLength
+ d.offset = newOffset
+ }
+ }
+ if prevLength >= minMatchLength && d.length <= prevLength {
+ // There was a match at the previous step, and the current match is
+ // not better. Output the previous match.
+ d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
+ d.tokens.n++
+
+ // Insert in the hash table all strings up to the end of the match.
+ // index and index-1 are already inserted. If there is not enough
+ // lookahead, the last two strings are not inserted into the hash
+ // table.
+ var newIndex int
+ newIndex = d.index + prevLength - 1
+ // Calculate missing hashes
+ end := newIndex
+ if end > d.maxInsertIndex {
+ end = d.maxInsertIndex
+ }
+ end += minMatchLength - 1
+ startindex := d.index + 1
+ if startindex > d.maxInsertIndex {
+ startindex = d.maxInsertIndex
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+ if dstSize > 0 {
+ dst := d.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ d.hashPrev[di&windowMask] = d.hashHead[newH]
+ // Set the head of the hash chain to us.
+ d.hashHead[newH] = uint32(di + d.hashOffset)
+ }
+ d.hash = newH
+ }
+
+ d.index = newIndex
+ d.byteAvailable = false
+ d.length = minMatchLength - 1
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ } else {
+ // Reset, if we got a match this run.
+ if d.length >= minMatchLength {
+ d.ii = 0
+ }
+ // We have a byte waiting. Emit it.
+ if d.byteAvailable {
+ d.ii++
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.n++
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ d.index++
+
+ // If we have a long run of no matches, skip additional bytes
+ // Resets when d.ii overflows after 64KB.
+ if d.ii > 31 {
+ n := int(d.ii >> 5)
+ for j := 0; j < n; j++ {
+ if d.index >= d.windowEnd-1 {
+ break
+ }
+
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.n++
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ d.index++
+ }
+ // Flush last byte
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.n++
+ d.byteAvailable = false
+ // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ }
+ } else {
+ d.index++
+ d.byteAvailable = true
+ }
+ }
+ }
+}
+
+// Assumes that d.fastSkipHashing != skipNever,
+// otherwise use deflateLazySSE
+func (d *compressor) deflateSSE() {
+
+ // Sanity enables additional runtime tests.
+ // It's intended to be used during development
+ // to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+ return
+ }
+
+ d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+ if d.index < d.maxInsertIndex {
+ d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+ }
+
+ for {
+ if sanity && d.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ lookahead := d.windowEnd - d.index
+ if lookahead < minMatchLength+maxMatchLength {
+ if !d.sync {
+ return
+ }
+ if sanity && d.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ if lookahead == 0 {
+ if d.tokens.n > 0 {
+ if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ return
+ }
+ }
+ if d.index < d.maxInsertIndex {
+ // Update the hash
+ d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+ ch := d.hashHead[d.hash]
+ d.chainHead = int(ch)
+ d.hashPrev[d.index&windowMask] = ch
+ d.hashHead[d.hash] = uint32(d.index + d.hashOffset)
+ }
+ d.length = minMatchLength - 1
+ d.offset = 0
+ minIndex := d.index - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+
+ if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
+ if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
+ d.length = newLength
+ d.offset = newOffset
+ }
+ }
+ if d.length >= minMatchLength {
+ d.ii = 0
+ // There was a match at the previous step, and the current match is
+ // not better. Output the previous match.
+ // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
+ d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
+ d.tokens.n++
+ // Insert in the hash table all strings up to the end of the match.
+ // index and index-1 are already inserted. If there is not enough
+ // lookahead, the last two strings are not inserted into the hash
+ // table.
+ if d.length <= d.fastSkipHashing {
+ var newIndex int
+ newIndex = d.index + d.length
+ // Calculate missing hashes
+ end := newIndex
+ if end > d.maxInsertIndex {
+ end = d.maxInsertIndex
+ }
+ end += minMatchLength - 1
+ startindex := d.index + 1
+ if startindex > d.maxInsertIndex {
+ startindex = d.maxInsertIndex
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+ if dstSize > 0 {
+ dst := d.hashMatch[:dstSize]
+
+ crc32sseAll(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ d.hashPrev[di&windowMask] = d.hashHead[newH]
+ // Set the head of the hash chain to us.
+ d.hashHead[newH] = uint32(di + d.hashOffset)
+ }
+ d.hash = newH
+ }
+ d.index = newIndex
+ } else {
+ // For matches this long, we don't bother inserting each individual
+ // item into the table.
+ d.index += d.length
+ if d.index < d.maxInsertIndex {
+ d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+ }
+ }
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ } else {
+ d.ii++
+ end := d.index + int(d.ii>>5) + 1
+ if end > d.windowEnd {
+ end = d.windowEnd
+ }
+ for i := d.index; i < end; i++ {
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
+ d.tokens.n++
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ }
+ d.index = end
+ }
+ }
+}
+
+// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
+// meaning it always has lazy matching on.
+func (d *compressor) deflateLazySSE() {
+ // Sanity enables additional runtime tests.
+ // It's intended to be used during development
+ // to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+ return
+ }
+
+ d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+ if d.index < d.maxInsertIndex {
+ d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+ }
+
+ for {
+ if sanity && d.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ lookahead := d.windowEnd - d.index
+ if lookahead < minMatchLength+maxMatchLength {
+ if !d.sync {
+ return
+ }
+ if sanity && d.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ if lookahead == 0 {
+ // Flush current output block if any.
+ if d.byteAvailable {
+ // There is still one pending token that needs to be flushed
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.n++
+ d.byteAvailable = false
+ }
+ if d.tokens.n > 0 {
+ if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ return
+ }
+ }
+ if d.index < d.maxInsertIndex {
+ // Update the hash
+ d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+ ch := d.hashHead[d.hash]
+ d.chainHead = int(ch)
+ d.hashPrev[d.index&windowMask] = ch
+ d.hashHead[d.hash] = uint32(d.index + d.hashOffset)
+ }
+ prevLength := d.length
+ prevOffset := d.offset
+ d.length = minMatchLength - 1
+ d.offset = 0
+ minIndex := d.index - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+
+ if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+ if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
+ d.length = newLength
+ d.offset = newOffset
+ }
+ }
+ if prevLength >= minMatchLength && d.length <= prevLength {
+ // There was a match at the previous step, and the current match is
+ // not better. Output the previous match.
+ d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
+ d.tokens.n++
+
+ // Insert in the hash table all strings up to the end of the match.
+ // index and index-1 are already inserted. If there is not enough
+ // lookahead, the last two strings are not inserted into the hash
+ // table.
+ var newIndex int
+ newIndex = d.index + prevLength - 1
+ // Calculate missing hashes
+ end := newIndex
+ if end > d.maxInsertIndex {
+ end = d.maxInsertIndex
+ }
+ end += minMatchLength - 1
+ startindex := d.index + 1
+ if startindex > d.maxInsertIndex {
+ startindex = d.maxInsertIndex
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+ if dstSize > 0 {
+ dst := d.hashMatch[:dstSize]
+ crc32sseAll(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ d.hashPrev[di&windowMask] = d.hashHead[newH]
+ // Set the head of the hash chain to us.
+ d.hashHead[newH] = uint32(di + d.hashOffset)
+ }
+ d.hash = newH
+ }
+
+ d.index = newIndex
+ d.byteAvailable = false
+ d.length = minMatchLength - 1
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ } else {
+ // Reset, if we got a match this run.
+ if d.length >= minMatchLength {
+ d.ii = 0
+ }
+ // We have a byte waiting. Emit it.
+ if d.byteAvailable {
+ d.ii++
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.n++
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ d.index++
+
+ // If we have a long run of no matches, skip additional bytes
+ // Resets when d.ii overflows after 64KB.
+ if d.ii > 31 {
+ n := int(d.ii >> 6)
+ for j := 0; j < n; j++ {
+ if d.index >= d.windowEnd-1 {
+ break
+ }
+
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.n++
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ d.index++
+ }
+ // Flush last byte
+ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+ d.tokens.n++
+ d.byteAvailable = false
+ // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+ return
+ }
+ d.tokens.n = 0
+ }
+ }
+ } else {
+ d.index++
+ d.byteAvailable = true
+ }
+ }
+ }
+}
+
+func (d *compressor) store() {
+ if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ d.windowEnd = 0
+ }
+}
+
+// fillWindow will fill the buffer with data for huffman-only compression.
+// The number of bytes copied is returned.
+func (d *compressor) fillBlock(b []byte) int {
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+// storeHuff will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeHuff() {
+ if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
+ return
+ }
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd])
+ d.err = d.w.err
+ d.windowEnd = 0
+}
+
+// storeHuff will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeSnappy() {
+ // We only compress if we have maxStoreBlockSize.
+ if d.windowEnd < maxStoreBlockSize {
+ if !d.sync {
+ return
+ }
+ // Handle extremely small sizes.
+ if d.windowEnd < 128 {
+ if d.windowEnd == 0 {
+ return
+ }
+ if d.windowEnd <= 32 {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ d.tokens.n = 0
+ d.windowEnd = 0
+ } else {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd])
+ d.err = d.w.err
+ }
+ d.tokens.n = 0
+ d.windowEnd = 0
+ d.snap.Reset()
+ return
+ }
+ }
+
+ d.snap.Encode(&d.tokens, d.window[:d.windowEnd])
+ // If we made zero matches, store the block as is.
+ if int(d.tokens.n) == d.windowEnd {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ // If we removed less than 1/16th, huffman compress the block.
+ } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd])
+ d.err = d.w.err
+ } else {
+ d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd])
+ d.err = d.w.err
+ }
+ d.tokens.n = 0
+ d.windowEnd = 0
+}
+
+// write will add input byte to the stream.
+// Unless an error occurs all bytes will be consumed.
+func (d *compressor) write(b []byte) (n int, err error) {
+ if d.err != nil {
+ return 0, d.err
+ }
+ n = len(b)
+ for len(b) > 0 {
+ d.step(d)
+ b = b[d.fill(d, b):]
+ if d.err != nil {
+ return 0, d.err
+ }
+ }
+ return n, d.err
+}
+
+func (d *compressor) syncFlush() error {
+ d.sync = true
+ if d.err != nil {
+ return d.err
+ }
+ d.step(d)
+ if d.err == nil {
+ d.w.writeStoredHeader(0, false)
+ d.w.flush()
+ d.err = d.w.err
+ }
+ d.sync = false
+ return d.err
+}
+
+func (d *compressor) init(w io.Writer, level int) (err error) {
+ d.w = newHuffmanBitWriter(w)
+
+ switch {
+ case level == NoCompression:
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).store
+ case level == ConstantCompression:
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeHuff
+ case level >= 1 && level <= 4:
+ d.snap = newSnappy(level)
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeSnappy
+ case level == DefaultCompression:
+ level = 5
+ fallthrough
+ case 5 <= level && level <= 9:
+ d.compressionLevel = levels[level]
+ d.initDeflate()
+ d.fill = (*compressor).fillDeflate
+ if d.fastSkipHashing == skipNever {
+ if useSSE42 {
+ d.step = (*compressor).deflateLazySSE
+ } else {
+ d.step = (*compressor).deflateLazy
+ }
+ } else {
+ if useSSE42 {
+ d.step = (*compressor).deflateSSE
+ } else {
+ d.step = (*compressor).deflate
+
+ }
+ }
+ default:
+ return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
+ }
+ return nil
+}
+
+// reset the state of the compressor.
+func (d *compressor) reset(w io.Writer) {
+ d.w.reset(w)
+ d.sync = false
+ d.err = nil
+ // We only need to reset a few things for Snappy.
+ if d.snap != nil {
+ d.snap.Reset()
+ d.windowEnd = 0
+ d.tokens.n = 0
+ return
+ }
+ switch d.compressionLevel.chain {
+ case 0:
+ // level was NoCompression or ConstantCompresssion.
+ d.windowEnd = 0
+ default:
+ d.chainHead = -1
+ for i := range d.hashHead {
+ d.hashHead[i] = 0
+ }
+ for i := range d.hashPrev {
+ d.hashPrev[i] = 0
+ }
+ d.hashOffset = 1
+ d.index, d.windowEnd = 0, 0
+ d.blockStart, d.byteAvailable = 0, false
+ d.tokens.n = 0
+ d.length = minMatchLength - 1
+ d.offset = 0
+ d.hash = 0
+ d.ii = 0
+ d.maxInsertIndex = 0
+ }
+}
+
+func (d *compressor) close() error {
+ if d.err != nil {
+ return d.err
+ }
+ d.sync = true
+ d.step(d)
+ if d.err != nil {
+ return d.err
+ }
+ if d.w.writeStoredHeader(0, true); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.flush()
+ return d.w.err
+}
+
+// NewWriter returns a new Writer compressing data at the given level.
+// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
+// higher levels typically run slower but compress more.
+// Level 0 (NoCompression) does not attempt any compression; it only adds the
+// necessary DEFLATE framing.
+// Level -1 (DefaultCompression) uses the default compression level.
+// Level -2 (ConstantCompression) will use Huffman compression only, giving
+// a very fast compression for all types of input, but sacrificing considerable
+// compression efficiency.
+//
+// If level is in the range [-2, 9] then the error returned will be nil.
+// Otherwise the error returned will be non-nil.
+func NewWriter(w io.Writer, level int) (*Writer, error) {
+ var dw Writer
+ if err := dw.d.init(w, level); err != nil {
+ return nil, err
+ }
+ return &dw, nil
+}
+
+// NewWriterDict is like NewWriter but initializes the new
+// Writer with a preset dictionary. The returned Writer behaves
+// as if the dictionary had been written to it without producing
+// any compressed output. The compressed data written to w
+// can only be decompressed by a Reader initialized with the
+// same dictionary.
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
+ dw := &dictWriter{w}
+ zw, err := NewWriter(dw, level)
+ if err != nil {
+ return nil, err
+ }
+ zw.d.fillWindow(dict)
+ zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
+ return zw, err
+}
+
+type dictWriter struct {
+ w io.Writer
+}
+
+func (w *dictWriter) Write(b []byte) (n int, err error) {
+ return w.w.Write(b)
+}
+
+// A Writer takes data written to it and writes the compressed
+// form of that data to an underlying writer (see NewWriter).
+type Writer struct {
+ d compressor
+ dict []byte
+}
+
+// Write writes data to w, which will eventually write the
+// compressed form of data to its underlying writer.
+func (w *Writer) Write(data []byte) (n int, err error) {
+ return w.d.write(data)
+}
+
+// Flush flushes any pending data to the underlying writer.
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet.
+// Flush does not return until the data has been written.
+// Calling Flush when there is no pending data still causes the Writer
+// to emit a sync marker of at least 4 bytes.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (w *Writer) Flush() error {
+ // For more about flushing:
+ // http://www.bolet.org/~pornin/deflate-flush.html
+ return w.d.syncFlush()
+}
+
+// Close flushes and closes the writer.
+func (w *Writer) Close() error {
+ return w.d.close()
+}
+
+// Reset discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level and dictionary.
+func (w *Writer) Reset(dst io.Writer) {
+ if dw, ok := w.d.w.writer.(*dictWriter); ok {
+ // w was created with NewWriterDict
+ dw.w = dst
+ w.d.reset(dw)
+ w.d.fillWindow(w.dict)
+ } else {
+ // w was created with NewWriter
+ w.d.reset(dst)
+ }
+}
+
+// ResetDict discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level, but sets a specific dictionary.
+func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
+ w.dict = dict
+ w.d.reset(dst)
+ w.d.fillWindow(w.dict)
+}
diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
new file mode 100644
index 000000000..71c75a065
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
+// LZ77 decompresses data through sequences of two forms of commands:
+//
+// * Literal insertions: Runs of one or more symbols are inserted into the data
+// stream as is. This is accomplished through the writeByte method for a
+// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+// Any valid stream must start with a literal insertion if no preset dictionary
+// is used.
+//
+// * Backward copies: Runs of one or more symbols are copied from previously
+// emitted data. Backward copies come as the tuple (dist, length) where dist
+// determines how far back in the stream to copy from and length determines how
+// many bytes to copy. Note that it is valid for the length to be greater than
+// the distance. Since LZ77 uses forward copies, that situation is used to
+// perform a form of run-length encoding on repeated runs of symbols.
+// The writeCopy and tryWriteCopy are used to implement this command.
+//
+// For performance reasons, this implementation performs little to no sanity
+// checks about the arguments. As such, the invariants documented for each
+// method call must be respected.
+type dictDecoder struct {
+ hist []byte // Sliding window history
+
+ // Invariant: 0 <= rdPos <= wrPos <= len(hist)
+ wrPos int // Current output position in buffer
+ rdPos int // Have emitted hist[:rdPos] already
+ full bool // Has a full window length been written yet?
+}
+
+// init initializes dictDecoder to have a sliding window dictionary of the given
+// size. If a preset dict is provided, it will initialize the dictionary with
+// the contents of dict.
+func (dd *dictDecoder) init(size int, dict []byte) {
+ *dd = dictDecoder{hist: dd.hist}
+
+ if cap(dd.hist) < size {
+ dd.hist = make([]byte, size)
+ }
+ dd.hist = dd.hist[:size]
+
+ if len(dict) > len(dd.hist) {
+ dict = dict[len(dict)-len(dd.hist):]
+ }
+ dd.wrPos = copy(dd.hist, dict)
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos = 0
+ dd.full = true
+ }
+ dd.rdPos = dd.wrPos
+}
+
+// histSize reports the total amount of historical data in the dictionary.
+func (dd *dictDecoder) histSize() int {
+ if dd.full {
+ return len(dd.hist)
+ }
+ return dd.wrPos
+}
+
+// availRead reports the number of bytes that can be flushed by readFlush.
+func (dd *dictDecoder) availRead() int {
+ return dd.wrPos - dd.rdPos
+}
+
+// availWrite reports the available amount of output buffer space.
+func (dd *dictDecoder) availWrite() int {
+ return len(dd.hist) - dd.wrPos
+}
+
+// writeSlice returns a slice of the available buffer to write data to.
+//
+// This invariant will be kept: len(s) <= availWrite()
+func (dd *dictDecoder) writeSlice() []byte {
+ return dd.hist[dd.wrPos:]
+}
+
+// writeMark advances the writer pointer by cnt.
+//
+// This invariant must be kept: 0 <= cnt <= availWrite()
+func (dd *dictDecoder) writeMark(cnt int) {
+ dd.wrPos += cnt
+}
+
+// writeByte writes a single byte to the dictionary.
+//
+// This invariant must be kept: 0 < availWrite()
+func (dd *dictDecoder) writeByte(c byte) {
+ dd.hist[dd.wrPos] = c
+ dd.wrPos++
+}
+
+// writeCopy copies a string at a given (dist, length) to the output.
+// This returns the number of bytes copied and may be less than the requested
+// length if the available space in the output buffer is too small.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) writeCopy(dist, length int) int {
+ dstBase := dd.wrPos
+ dstPos := dstBase
+ srcPos := dstPos - dist
+ endPos := dstPos + length
+ if endPos > len(dd.hist) {
+ endPos = len(dd.hist)
+ }
+
+ // Copy non-overlapping section after destination position.
+ //
+ // This section is non-overlapping in that the copy length for this section
+ // is always less than or equal to the backwards distance. This can occur
+ // if a distance refers to data that wraps-around in the buffer.
+ // Thus, a backwards copy is performed here; that is, the exact bytes in
+ // the source prior to the copy is placed in the destination.
+ if srcPos < 0 {
+ srcPos += len(dd.hist)
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
+ srcPos = 0
+ }
+
+ // Copy possibly overlapping section before destination position.
+ //
+ // This section can overlap if the copy length for this section is larger
+ // than the backwards distance. This is allowed by LZ77 so that repeated
+ // strings can be succinctly represented using (dist, length) pairs.
+ // Thus, a forwards copy is performed here; that is, the bytes copied is
+ // possibly dependent on the resulting bytes in the destination as the copy
+ // progresses along. This is functionally equivalent to the following:
+ //
+ // for i := 0; i < endPos-dstPos; i++ {
+ // dd.hist[dstPos+i] = dd.hist[srcPos+i]
+ // }
+ // dstPos = endPos
+ //
+ for dstPos < endPos {
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// tryWriteCopy tries to copy a string at a given (distance, length) to the
+// output. This specialized version is optimized for short distances.
+//
+// This method is designed to be inlined for performance reasons.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
+ dstPos := dd.wrPos
+ endPos := dstPos + length
+ if dstPos < dist || endPos > len(dd.hist) {
+ return 0
+ }
+ dstBase := dstPos
+ srcPos := dstPos - dist
+
+ // Copy possibly overlapping section before destination position.
+loop:
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ if dstPos < endPos {
+ goto loop // Avoid for-loop so that this function can be inlined
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// readFlush returns a slice of the historical buffer that is ready to be
+// emitted to the user. The data returned by readFlush must be fully consumed
+// before calling any other dictDecoder methods.
+func (dd *dictDecoder) readFlush() []byte {
+ toRead := dd.hist[dd.rdPos:dd.wrPos]
+ dd.rdPos = dd.wrPos
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos, dd.rdPos = 0, 0
+ dd.full = true
+ }
+ return toRead
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
new file mode 100644
index 000000000..f9b2a699a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -0,0 +1,701 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "io"
+)
+
+const (
+ // The largest offset code.
+ offsetCodeCount = 30
+
+ // The special code used to mark the end of a block.
+ endBlockMarker = 256
+
+ // The first length code.
+ lengthCodesStart = 257
+
+ // The number of codegen codes.
+ codegenCodeCount = 19
+ badCode = 255
+
+ // bufferFlushSize indicates the buffer size
+ // after which bytes are flushed to the writer.
+ // Should preferably be a multiple of 6, since
+ // we accumulate 6 bytes between writes to the buffer.
+ bufferFlushSize = 240
+
+ // bufferSize is the actual output byte buffer size.
+ // It must have additional headroom for a flush
+ // which can contain up to 8 bytes.
+ bufferSize = bufferFlushSize + 8
+)
+
+// The number of extra bits needed by length code X - LENGTH_CODES_START.
+var lengthExtraBits = []int8{
+ /* 257 */ 0, 0, 0,
+ /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+ /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+ /* 280 */ 4, 5, 5, 5, 5, 0,
+}
+
+// The length indicated by length code X - LENGTH_CODES_START.
+var lengthBase = []uint32{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
+ 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+ 64, 80, 96, 112, 128, 160, 192, 224, 255,
+}
+
+// offset code word extra bits.
+var offsetExtraBits = []int8{
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+ /* extended window */
+ 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
+}
+
+var offsetBase = []uint32{
+ /* normal deflate */
+ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
+ 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
+ 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
+ 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
+ 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
+ 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+
+ /* extended window */
+ 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
+ 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
+ 0x100000, 0x180000, 0x200000, 0x300000,
+}
+
+// The odd order in which the codegen code sizes are written.
+var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+type huffmanBitWriter struct {
+ // writer is the underlying writer.
+ // Do not use it directly; use the write method, which ensures
+ // that Write errors are sticky.
+ writer io.Writer
+
+ // Data waiting to be written is bytes[0:nbytes]
+ // and then the low nbits of bits.
+ bits uint64
+ nbits uint
+ bytes [bufferSize]byte
+ codegenFreq [codegenCodeCount]int32
+ nbytes int
+ literalFreq []int32
+ offsetFreq []int32
+ codegen []uint8
+ literalEncoding *huffmanEncoder
+ offsetEncoding *huffmanEncoder
+ codegenEncoding *huffmanEncoder
+ err error
+}
+
+func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
+ return &huffmanBitWriter{
+ writer: w,
+ literalFreq: make([]int32, maxNumLit),
+ offsetFreq: make([]int32, offsetCodeCount),
+ codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
+ literalEncoding: newHuffmanEncoder(maxNumLit),
+ codegenEncoding: newHuffmanEncoder(codegenCodeCount),
+ offsetEncoding: newHuffmanEncoder(offsetCodeCount),
+ }
+}
+
+func (w *huffmanBitWriter) reset(writer io.Writer) {
+ w.writer = writer
+ w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
+ w.bytes = [bufferSize]byte{}
+}
+
+func (w *huffmanBitWriter) flush() {
+ if w.err != nil {
+ w.nbits = 0
+ return
+ }
+ n := w.nbytes
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ if w.nbits > 8 { // Avoid underflow
+ w.nbits -= 8
+ } else {
+ w.nbits = 0
+ }
+ n++
+ }
+ w.bits = 0
+ w.write(w.bytes[:n])
+ w.nbytes = 0
+}
+
+func (w *huffmanBitWriter) write(b []byte) {
+ if w.err != nil {
+ return
+ }
+ _, w.err = w.writer.Write(b)
+}
+
+func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
+ if w.err != nil {
+ return
+ }
+ w.bits |= uint64(b) << w.nbits
+ w.nbits += nb
+ if w.nbits >= 48 {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+ bytes := w.bytes[n : n+6]
+ bytes[0] = byte(bits)
+ bytes[1] = byte(bits >> 8)
+ bytes[2] = byte(bits >> 16)
+ bytes[3] = byte(bits >> 24)
+ bytes[4] = byte(bits >> 32)
+ bytes[5] = byte(bits >> 40)
+ n += 6
+ if n >= bufferFlushSize {
+ w.write(w.bytes[:n])
+ n = 0
+ }
+ w.nbytes = n
+ }
+}
+
+func (w *huffmanBitWriter) writeBytes(bytes []byte) {
+ if w.err != nil {
+ return
+ }
+ n := w.nbytes
+ if w.nbits&7 != 0 {
+ w.err = InternalError("writeBytes with unfinished bits")
+ return
+ }
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ w.nbits -= 8
+ n++
+ }
+ if n != 0 {
+ w.write(w.bytes[:n])
+ }
+ w.nbytes = 0
+ w.write(bytes)
+}
+
+// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
+// the literal and offset lengths arrays (which are concatenated into a single
+// array). This method generates that run-length encoding.
+//
+// The result is written into the codegen array, and the frequencies
+// of each code is written into the codegenFreq array.
+// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
+// information. Code badCode is an end marker
+//
+// numLiterals The number of literals in literalEncoding
+// numOffsets The number of offsets in offsetEncoding
+// litenc, offenc The literal and offset encoder to use
+func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
+ for i := range w.codegenFreq {
+ w.codegenFreq[i] = 0
+ }
+ // Note that we are using codegen both as a temporary variable for holding
+ // a copy of the frequencies, and as the place where we put the result.
+ // This is fine because the output is always shorter than the input used
+ // so far.
+ codegen := w.codegen // cache
+ // Copy the concatenated code sizes to codegen. Put a marker at the end.
+ cgnl := codegen[:numLiterals]
+ for i := range cgnl {
+ cgnl[i] = uint8(litEnc.codes[i].len)
+ }
+
+ cgnl = codegen[numLiterals : numLiterals+numOffsets]
+ for i := range cgnl {
+ cgnl[i] = uint8(offEnc.codes[i].len)
+ }
+ codegen[numLiterals+numOffsets] = badCode
+
+ size := codegen[0]
+ count := 1
+ outIndex := 0
+ for inIndex := 1; size != badCode; inIndex++ {
+ // INVARIANT: We have seen "count" copies of size that have not yet
+ // had output generated for them.
+ nextSize := codegen[inIndex]
+ if nextSize == size {
+ count++
+ continue
+ }
+ // We need to generate codegen indicating "count" of size.
+ if size != 0 {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ count--
+ for count >= 3 {
+ n := 6
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 16
+ outIndex++
+ codegen[outIndex] = uint8(n - 3)
+ outIndex++
+ w.codegenFreq[16]++
+ count -= n
+ }
+ } else {
+ for count >= 11 {
+ n := 138
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 18
+ outIndex++
+ codegen[outIndex] = uint8(n - 11)
+ outIndex++
+ w.codegenFreq[18]++
+ count -= n
+ }
+ if count >= 3 {
+ // count >= 3 && count <= 10
+ codegen[outIndex] = 17
+ outIndex++
+ codegen[outIndex] = uint8(count - 3)
+ outIndex++
+ w.codegenFreq[17]++
+ count = 0
+ }
+ }
+ count--
+ for ; count >= 0; count-- {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ }
+ // Set up invariant for next time through the loop.
+ size = nextSize
+ count = 1
+ }
+ // Marker indicating the end of the codegen.
+ codegen[outIndex] = badCode
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
+ numCodegens = len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ header := 3 + 5 + 5 + 4 + (3 * numCodegens) +
+ w.codegenEncoding.bitLength(w.codegenFreq[:]) +
+ int(w.codegenFreq[16])*2 +
+ int(w.codegenFreq[17])*3 +
+ int(w.codegenFreq[18])*7
+ size = header +
+ litEnc.bitLength(w.literalFreq) +
+ offEnc.bitLength(w.offsetFreq) +
+ extraBits
+
+ return size, numCodegens
+}
+
+// fixedSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) fixedSize(extraBits int) int {
+ return 3 +
+ fixedLiteralEncoding.bitLength(w.literalFreq) +
+ fixedOffsetEncoding.bitLength(w.offsetFreq) +
+ extraBits
+}
+
+// storedSize calculates the stored size, including header.
+// The function returns the size in bits and whether the block
+// fits inside a single block.
+func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
+ if in == nil {
+ return 0, false
+ }
+ if len(in) <= maxStoreBlockSize {
+ return (len(in) + 5) * 8, true
+ }
+ return 0, false
+}
+
+func (w *huffmanBitWriter) writeCode(c hcode) {
+ if w.err != nil {
+ return
+ }
+ w.bits |= uint64(c.code) << w.nbits
+ w.nbits += uint(c.len)
+ if w.nbits >= 48 {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+ bytes := w.bytes[n : n+6]
+ bytes[0] = byte(bits)
+ bytes[1] = byte(bits >> 8)
+ bytes[2] = byte(bits >> 16)
+ bytes[3] = byte(bits >> 24)
+ bytes[4] = byte(bits >> 32)
+ bytes[5] = byte(bits >> 40)
+ n += 6
+ if n >= bufferFlushSize {
+ w.write(w.bytes[:n])
+ n = 0
+ }
+ w.nbytes = n
+ }
+}
+
+// Write the header of a dynamic Huffman block to the output stream.
+//
+// numLiterals The number of literals specified in codegen
+// numOffsets The number of offsets specified in codegen
+// numCodegens The number of codegens used in codegen
+func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ var firstBits int32 = 4
+ if isEof {
+ firstBits = 5
+ }
+ w.writeBits(firstBits, 3)
+ w.writeBits(int32(numLiterals-257), 5)
+ w.writeBits(int32(numOffsets-1), 5)
+ w.writeBits(int32(numCodegens-4), 4)
+
+ for i := 0; i < numCodegens; i++ {
+ value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
+ w.writeBits(int32(value), 3)
+ }
+
+ i := 0
+ for {
+ var codeWord int = int(w.codegen[i])
+ i++
+ if codeWord == badCode {
+ break
+ }
+ w.writeCode(w.codegenEncoding.codes[uint32(codeWord)])
+
+ switch codeWord {
+ case 16:
+ w.writeBits(int32(w.codegen[i]), 2)
+ i++
+ break
+ case 17:
+ w.writeBits(int32(w.codegen[i]), 3)
+ i++
+ break
+ case 18:
+ w.writeBits(int32(w.codegen[i]), 7)
+ i++
+ break
+ }
+ }
+}
+
+func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ var flag int32
+ if isEof {
+ flag = 1
+ }
+ w.writeBits(flag, 3)
+ w.flush()
+ w.writeBits(int32(length), 16)
+ w.writeBits(int32(^uint16(length)), 16)
+}
+
+func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
+ if w.err != nil {
+ return
+ }
+ // Indicate that we are a fixed Huffman block
+ var value int32 = 2
+ if isEof {
+ value = 3
+ }
+ w.writeBits(value, 3)
+}
+
+// writeBlock will write a block of tokens with the smallest encoding.
+// The original input can be supplied, and if the huffman encoded data
+// is larger than the original bytes, the data will be written as a
+// stored block.
+// If the input is nil, the tokens will always be Huffman encoded.
+func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
+ if w.err != nil {
+ return
+ }
+
+ tokens = append(tokens, endBlockMarker)
+ numLiterals, numOffsets := w.indexTokens(tokens)
+
+ var extraBits int
+ storedSize, storable := w.storedSize(input)
+ if storable {
+ // We only bother calculating the costs of the extra bits required by
+ // the length of offset fields (which will be the same for both fixed
+ // and dynamic encoding), if we need to compare those two encodings
+ // against stored encoding.
+ for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
+ // First eight length codes have extra size = 0.
+ extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart])
+ }
+ for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
+ // First four offset codes have extra size = 0.
+ extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode])
+ }
+ }
+
+ // Figure out smallest code.
+ // Fixed Huffman baseline.
+ var literalEncoding = fixedLiteralEncoding
+ var offsetEncoding = fixedOffsetEncoding
+ var size = w.fixedSize(extraBits)
+
+ // Dynamic Huffman?
+ var numCodegens int
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ if dynamicSize < size {
+ size = dynamicSize
+ literalEncoding = w.literalEncoding
+ offsetEncoding = w.offsetEncoding
+ }
+
+ // Stored bytes?
+ if storable && storedSize < size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Huffman.
+ if literalEncoding == fixedLiteralEncoding {
+ w.writeFixedHeader(eof)
+ } else {
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ }
+
+ // Write the tokens.
+ w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes)
+}
+
+// writeBlockDynamic encodes a block using a dynamic Huffman table.
+// This should be used if the symbols used have a disproportionate
+// histogram distribution.
+// If input is supplied and the compression savings are below 1/16th of the
+// input size the block is stored.
+func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) {
+ if w.err != nil {
+ return
+ }
+
+ tokens = append(tokens, endBlockMarker)
+ numLiterals, numOffsets := w.indexTokens(tokens)
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0)
+
+ // Store bytes, if we don't get a reasonable improvement.
+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Write Huffman table.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+
+ // Write the tokens.
+ w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes)
+}
+
+// indexTokens indexes a slice of tokens, and updates
+// literalFreq and offsetFreq, and generates literalEncoding
+// and offsetEncoding.
+// The number of literal and offset tokens is returned.
+func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) {
+ for i := range w.literalFreq {
+ w.literalFreq[i] = 0
+ }
+ for i := range w.offsetFreq {
+ w.offsetFreq[i] = 0
+ }
+
+ for _, t := range tokens {
+ if t < matchType {
+ w.literalFreq[t.literal()]++
+ continue
+ }
+ length := t.length()
+ offset := t.offset()
+ w.literalFreq[lengthCodesStart+lengthCode(length)]++
+ w.offsetFreq[offsetCode(offset)]++
+ }
+
+ // get the number of literals
+ numLiterals = len(w.literalFreq)
+ for w.literalFreq[numLiterals-1] == 0 {
+ numLiterals--
+ }
+ // get the number of offsets
+ numOffsets = len(w.offsetFreq)
+ for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
+ numOffsets--
+ }
+ if numOffsets == 0 {
+ // We haven't found a single match. If we want to go with the dynamic encoding,
+ // we should count at least one offset to be sure that the offset huffman tree could be encoded.
+ w.offsetFreq[0] = 1
+ numOffsets = 1
+ }
+ w.literalEncoding.generate(w.literalFreq, 15)
+ w.offsetEncoding.generate(w.offsetFreq, 15)
+ return
+}
+
+// writeTokens writes a slice of tokens to the output.
+// codes for literal and offset encoding must be supplied.
+func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
+ if w.err != nil {
+ return
+ }
+ for _, t := range tokens {
+ if t < matchType {
+ w.writeCode(leCodes[t.literal()])
+ continue
+ }
+ // Write the length
+ length := t.length()
+ lengthCode := lengthCode(length)
+ w.writeCode(leCodes[lengthCode+lengthCodesStart])
+ extraLengthBits := uint(lengthExtraBits[lengthCode])
+ if extraLengthBits > 0 {
+ extraLength := int32(length - lengthBase[lengthCode])
+ w.writeBits(extraLength, extraLengthBits)
+ }
+ // Write the offset
+ offset := t.offset()
+ offsetCode := offsetCode(offset)
+ w.writeCode(oeCodes[offsetCode])
+ extraOffsetBits := uint(offsetExtraBits[offsetCode])
+ if extraOffsetBits > 0 {
+ extraOffset := int32(offset - offsetBase[offsetCode])
+ w.writeBits(extraOffset, extraOffsetBits)
+ }
+ }
+}
+
+// huffOffset is a static offset encoder used for huffman only encoding.
+// It can be reused since we will not be encoding offset values.
+var huffOffset *huffmanEncoder
+
+func init() {
+ w := newHuffmanBitWriter(nil)
+ w.offsetFreq[0] = 1
+ huffOffset = newHuffmanEncoder(offsetCodeCount)
+ huffOffset.generate(w.offsetFreq, 15)
+}
+
+// writeBlockHuff encodes a block of bytes as either
+// Huffman encoded literals or uncompressed bytes if the
+// results only gains very little from compression.
+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
+ if w.err != nil {
+ return
+ }
+
+ // Clear histogram
+ for i := range w.literalFreq {
+ w.literalFreq[i] = 0
+ }
+
+ // Add everything as literals
+ histogram(input, w.literalFreq)
+
+ w.literalFreq[endBlockMarker] = 1
+
+ const numLiterals = endBlockMarker + 1
+ const numOffsets = 1
+
+ w.literalEncoding.generate(w.literalFreq, 15)
+
+ // Figure out smallest code.
+ // Always use dynamic Huffman or Store
+ var numCodegens int
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
+
+ // Store bytes, if we don't get a reasonable improvement.
+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Huffman.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ encoding := w.literalEncoding.codes[:257]
+ n := w.nbytes
+ for _, t := range input {
+ // Bitwriting inlined, ~30% speedup
+ c := encoding[t]
+ w.bits |= uint64(c.code) << w.nbits
+ w.nbits += uint(c.len)
+ if w.nbits < 48 {
+ continue
+ }
+ // Store 6 bytes
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ bytes := w.bytes[n : n+6]
+ bytes[0] = byte(bits)
+ bytes[1] = byte(bits >> 8)
+ bytes[2] = byte(bits >> 16)
+ bytes[3] = byte(bits >> 24)
+ bytes[4] = byte(bits >> 32)
+ bytes[5] = byte(bits >> 40)
+ n += 6
+ if n < bufferFlushSize {
+ continue
+ }
+ w.write(w.bytes[:n])
+ if w.err != nil {
+ return // Return early in the event of write failures
+ }
+ n = 0
+ }
+ w.nbytes = n
+ w.writeCode(encoding[endBlockMarker])
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
new file mode 100644
index 000000000..bdcbd823b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -0,0 +1,344 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "math"
+ "sort"
+)
+
+// hcode is a huffman code with a bit code and bit length.
+type hcode struct {
+ code, len uint16
+}
+
+type huffmanEncoder struct {
+ codes []hcode
+ freqcache []literalNode
+ bitCount [17]int32
+ lns byLiteral // stored to avoid repeated allocation in generate
+ lfs byFreq // stored to avoid repeated allocation in generate
+}
+
+type literalNode struct {
+ literal uint16
+ freq int32
+}
+
+// A levelInfo describes the state of the constructed tree for a given depth.
+type levelInfo struct {
+ // Our level. for better printing
+ level int32
+
+ // The frequency of the last node at this level
+ lastFreq int32
+
+ // The frequency of the next character to add to this level
+ nextCharFreq int32
+
+ // The frequency of the next pair (from level below) to add to this level.
+ // Only valid if the "needed" value of the next lower level is 0.
+ nextPairFreq int32
+
+ // The number of chains remaining to generate for this level before moving
+ // up to the next level
+ needed int32
+}
+
+// set sets the code and length of an hcode.
+func (h *hcode) set(code uint16, length uint16) {
+ h.len = length
+ h.code = code
+}
+
+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
+
+func newHuffmanEncoder(size int) *huffmanEncoder {
+ return &huffmanEncoder{codes: make([]hcode, size)}
+}
+
+// Generates a HuffmanCode corresponding to the fixed literal table
+func generateFixedLiteralEncoding() *huffmanEncoder {
+ h := newHuffmanEncoder(maxNumLit)
+ codes := h.codes
+ var ch uint16
+ for ch = 0; ch < maxNumLit; ch++ {
+ var bits uint16
+ var size uint16
+ switch {
+ case ch < 144:
+ // size 8, 000110000 .. 10111111
+ bits = ch + 48
+ size = 8
+ break
+ case ch < 256:
+ // size 9, 110010000 .. 111111111
+ bits = ch + 400 - 144
+ size = 9
+ break
+ case ch < 280:
+ // size 7, 0000000 .. 0010111
+ bits = ch - 256
+ size = 7
+ break
+ default:
+ // size 8, 11000000 .. 11000111
+ bits = ch + 192 - 280
+ size = 8
+ }
+ codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
+ }
+ return h
+}
+
+func generateFixedOffsetEncoding() *huffmanEncoder {
+ h := newHuffmanEncoder(30)
+ codes := h.codes
+ for ch := range codes {
+ codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
+ }
+ return h
+}
+
+var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
+var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
+
+func (h *huffmanEncoder) bitLength(freq []int32) int {
+ var total int
+ for i, f := range freq {
+ if f != 0 {
+ total += int(f) * int(h.codes[i].len)
+ }
+ }
+ return total
+}
+
+const maxBitsLimit = 16
+
+// Return the number of literals assigned to each bit size in the Huffman encoding
+//
+// This method is only called when list.length >= 3
+// The cases of 0, 1, and 2 literals are handled by special case code.
+//
+// list An array of the literals with non-zero frequencies
+// and their associated frequencies. The array is in order of increasing
+// frequency, and has as its last element a special element with frequency
+// MaxInt32
+// maxBits The maximum number of bits that should be used to encode any literal.
+// Must be less than 16.
+// return An integer array in which array[i] indicates the number of literals
+// that should be encoded in i bits.
+func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
+ if maxBits >= maxBitsLimit {
+ panic("flate: maxBits too large")
+ }
+ n := int32(len(list))
+ list = list[0 : n+1]
+ list[n] = maxNode()
+
+ // The tree can't have greater depth than n - 1, no matter what. This
+ // saves a little bit of work in some small cases
+ if maxBits > n-1 {
+ maxBits = n - 1
+ }
+
+ // Create information about each of the levels.
+ // A bogus "Level 0" whose sole purpose is so that
+ // level1.prev.needed==0. This makes level1.nextPairFreq
+ // be a legitimate value that never gets chosen.
+ var levels [maxBitsLimit]levelInfo
+ // leafCounts[i] counts the number of literals at the left
+ // of ancestors of the rightmost node at level i.
+ // leafCounts[i][j] is the number of literals at the left
+ // of the level j ancestor.
+ var leafCounts [maxBitsLimit][maxBitsLimit]int32
+
+ for level := int32(1); level <= maxBits; level++ {
+ // For every level, the first two items are the first two characters.
+ // We initialize the levels as if we had already figured this out.
+ levels[level] = levelInfo{
+ level: level,
+ lastFreq: list[1].freq,
+ nextCharFreq: list[2].freq,
+ nextPairFreq: list[0].freq + list[1].freq,
+ }
+ leafCounts[level][level] = 2
+ if level == 1 {
+ levels[level].nextPairFreq = math.MaxInt32
+ }
+ }
+
+ // We need a total of 2*n - 2 items at top level and have already generated 2.
+ levels[maxBits].needed = 2*n - 4
+
+ level := maxBits
+ for {
+ l := &levels[level]
+ if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
+ // We've run out of both leafs and pairs.
+ // End all calculations for this level.
+ // To make sure we never come back to this level or any lower level,
+ // set nextPairFreq impossibly large.
+ l.needed = 0
+ levels[level+1].nextPairFreq = math.MaxInt32
+ level++
+ continue
+ }
+
+ prevFreq := l.lastFreq
+ if l.nextCharFreq < l.nextPairFreq {
+ // The next item on this row is a leaf node.
+ n := leafCounts[level][level] + 1
+ l.lastFreq = l.nextCharFreq
+ // Lower leafCounts are the same of the previous node.
+ leafCounts[level][level] = n
+ l.nextCharFreq = list[n].freq
+ } else {
+ // The next item on this row is a pair from the previous row.
+ // nextPairFreq isn't valid until we generate two
+ // more values in the level below
+ l.lastFreq = l.nextPairFreq
+ // Take leaf counts from the lower level, except counts[level] remains the same.
+ copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ levels[l.level-1].needed = 2
+ }
+
+ if l.needed--; l.needed == 0 {
+ // We've done everything we need to do for this level.
+ // Continue calculating one level up. Fill in nextPairFreq
+ // of that level with the sum of the two nodes we've just calculated on
+ // this level.
+ if l.level == maxBits {
+ // All done!
+ break
+ }
+ levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
+ level++
+ } else {
+ // If we stole from below, move down temporarily to replenish it.
+ for levels[level-1].needed > 0 {
+ level--
+ }
+ }
+ }
+
+ // Somethings is wrong if at the end, the top level is null or hasn't used
+ // all of the leaves.
+ if leafCounts[maxBits][maxBits] != n {
+ panic("leafCounts[maxBits][maxBits] != n")
+ }
+
+ bitCount := h.bitCount[:maxBits+1]
+ bits := 1
+ counts := &leafCounts[maxBits]
+ for level := maxBits; level > 0; level-- {
+ // chain.leafCount gives the number of literals requiring at least "bits"
+ // bits to encode.
+ bitCount[bits] = counts[level] - counts[level-1]
+ bits++
+ }
+ return bitCount
+}
+
+// Look at the leaves and assign them a bit count and an encoding as specified
+// in RFC 1951 3.2.2
+func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
+ code := uint16(0)
+ for n, bits := range bitCount {
+ code <<= 1
+ if n == 0 || bits == 0 {
+ continue
+ }
+ // The literals list[len(list)-bits] .. list[len(list)-bits]
+ // are encoded using "bits" bits, and get the values
+ // code, code + 1, .... The code values are
+ // assigned in literal order (not frequency order).
+ chunk := list[len(list)-int(bits):]
+
+ h.lns.sort(chunk)
+ for _, node := range chunk {
+ h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
+ code++
+ }
+ list = list[0 : len(list)-int(bits)]
+ }
+}
+
+// Update this Huffman Code object to be the minimum code for the specified frequency count.
+//
+// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
+// maxBits The maximum number of bits to use for any literal.
+func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
+ if h.freqcache == nil {
+ // Allocate a reusable buffer with the longest possible frequency table.
+ // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit.
+ // The largest of these is maxNumLit, so we allocate for that case.
+ h.freqcache = make([]literalNode, maxNumLit+1)
+ }
+ list := h.freqcache[:len(freq)+1]
+ // Number of non-zero literals
+ count := 0
+ // Set list to be the set of all non-zero literals and their frequencies
+ for i, f := range freq {
+ if f != 0 {
+ list[count] = literalNode{uint16(i), f}
+ count++
+ } else {
+ list[count] = literalNode{}
+ h.codes[i].len = 0
+ }
+ }
+ list[len(freq)] = literalNode{}
+
+ list = list[:count]
+ if count <= 2 {
+ // Handle the small cases here, because they are awkward for the general case code. With
+ // two or fewer literals, everything has bit length 1.
+ for i, node := range list {
+ // "list" is in order of increasing literal value.
+ h.codes[node.literal].set(uint16(i), 1)
+ }
+ return
+ }
+ h.lfs.sort(list)
+
+ // Get the number of literals for each bit count
+ bitCount := h.bitCounts(list, maxBits)
+ // And do the assignment
+ h.assignEncodingAndSize(bitCount, list)
+}
+
+type byLiteral []literalNode
+
+func (s *byLiteral) sort(a []literalNode) {
+ *s = byLiteral(a)
+ sort.Sort(s)
+}
+
+func (s byLiteral) Len() int { return len(s) }
+
+func (s byLiteral) Less(i, j int) bool {
+ return s[i].literal < s[j].literal
+}
+
+func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+type byFreq []literalNode
+
+func (s *byFreq) sort(a []literalNode) {
+ *s = byFreq(a)
+ sort.Sort(s)
+}
+
+func (s byFreq) Len() int { return len(s) }
+
+func (s byFreq) Less(i, j int) bool {
+ if s[i].freq == s[j].freq {
+ return s[i].literal < s[j].literal
+ }
+ return s[i].freq < s[j].freq
+}
+
+func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
new file mode 100644
index 000000000..800d0ce9e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -0,0 +1,880 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flate implements the DEFLATE compressed data format, described in
+// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
+// formats.
+package flate
+
+import (
+ "bufio"
+ "io"
+ "math/bits"
+ "strconv"
+ "sync"
+)
+
+const (
+ maxCodeLen = 16 // max length of Huffman code
+ maxCodeLenMask = 15 // mask for max length of Huffman code
+ // The next three numbers come from the RFC section 3.2.7, with the
+ // additional proviso in section 3.2.5 which implies that distance codes
+ // 30 and 31 should never occur in compressed data.
+ maxNumLit = 286
+ maxNumDist = 30
+ numCodes = 19 // number of codes in Huffman meta-code
+)
+
+// Initialize the fixedHuffmanDecoder only once upon first use.
+var fixedOnce sync.Once
+var fixedHuffmanDecoder huffmanDecoder
+
+// A CorruptInputError reports the presence of corrupt input at a given offset.
+type CorruptInputError int64
+
+func (e CorruptInputError) Error() string {
+ return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10)
+}
+
+// An InternalError reports an error in the flate code itself.
+type InternalError string
+
+func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
+
+// A ReadError reports an error encountered while reading input.
+//
+// Deprecated: No longer returned.
+type ReadError struct {
+ Offset int64 // byte offset where error occurred
+ Err error // error returned by underlying Read
+}
+
+func (e *ReadError) Error() string {
+ return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
+}
+
+// A WriteError reports an error encountered while writing output.
+//
+// Deprecated: No longer returned.
+type WriteError struct {
+ Offset int64 // byte offset where error occurred
+ Err error // error returned by underlying Write
+}
+
+func (e *WriteError) Error() string {
+ return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
+}
+
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// instead of allocating a new one.
+type Resetter interface {
+ // Reset discards any buffered data and resets the Resetter as if it was
+ // newly initialized with the given reader.
+ Reset(r io.Reader, dict []byte) error
+}
+
+// The data structure for decoding Huffman tables is based on that of
+// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
+// For codes smaller than the table width, there are multiple entries
+// (each combination of trailing bits has the same value). For codes
+// larger than the table width, the table contains a link to an overflow
+// table. The width of each entry in the link table is the maximum code
+// size minus the chunk width.
+//
+// Note that you can do a lookup in the table even without all bits
+// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
+// have the property that shorter codes come before longer ones, the
+// bit length estimate in the result is a lower bound on the actual
+// number of bits.
+//
+// See the following:
+// http://www.gzip.org/algorithm.txt
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+ huffmanChunkBits = 9
+ huffmanNumChunks = 1 << huffmanChunkBits
+ huffmanCountMask = 15
+ huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+ min int // the minimum code length
+ chunks *[huffmanNumChunks]uint32 // chunks as described above
+ links [][]uint32 // overflow links
+ linkMask uint32 // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(lengths []int) bool {
+ // Sanity enables additional runtime tests during Huffman
+ // table construction. It's intended to be used during
+ // development to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if h.chunks == nil {
+ h.chunks = &[huffmanNumChunks]uint32{}
+ }
+ if h.min != 0 {
+ *h = huffmanDecoder{chunks: h.chunks, links: h.links}
+ }
+
+ // Count number of codes of each length,
+ // compute min and max length.
+ var count [maxCodeLen]int
+ var min, max int
+ for _, n := range lengths {
+ if n == 0 {
+ continue
+ }
+ if min == 0 || n < min {
+ min = n
+ }
+ if n > max {
+ max = n
+ }
+ count[n&maxCodeLenMask]++
+ }
+
+ // Empty tree. The decompressor.huffSym function will fail later if the tree
+ // is used. Technically, an empty tree is only valid for the HDIST tree and
+ // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+ // is guaranteed to fail since it will attempt to use the tree to decode the
+ // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+ // guaranteed to fail later since the compressed data section must be
+ // composed of at least one symbol (the end-of-block marker).
+ if max == 0 {
+ return true
+ }
+
+ code := 0
+ var nextcode [maxCodeLen]int
+ for i := min; i <= max; i++ {
+ code <<= 1
+ nextcode[i&maxCodeLenMask] = code
+ code += count[i&maxCodeLenMask]
+ }
+
+ // Check that the coding is complete (i.e., that we've
+ // assigned all 2-to-the-max possible bit sequences).
+ // Exception: To be compatible with zlib, we also need to
+ // accept degenerate single-code codings. See also
+ // TestDegenerateHuffmanCoding.
+ if code != 1<<uint(max) && !(code == 1 && max == 1) {
+ return false
+ }
+
+ h.min = min
+ chunks := h.chunks[:]
+ for i := range chunks {
+ chunks[i] = 0
+ }
+
+ if max > huffmanChunkBits {
+ numLinks := 1 << (uint(max) - huffmanChunkBits)
+ h.linkMask = uint32(numLinks - 1)
+
+ // create link tables
+ link := nextcode[huffmanChunkBits+1] >> 1
+ if cap(h.links) < huffmanNumChunks-link {
+ h.links = make([][]uint32, huffmanNumChunks-link)
+ } else {
+ h.links = h.links[:huffmanNumChunks-link]
+ }
+ for j := uint(link); j < huffmanNumChunks; j++ {
+ reverse := int(bits.Reverse16(uint16(j)))
+ reverse >>= uint(16 - huffmanChunkBits)
+ off := j - uint(link)
+ if sanity && h.chunks[reverse] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
+ if cap(h.links[off]) < numLinks {
+ h.links[off] = make([]uint32, numLinks)
+ } else {
+ links := h.links[off][:0]
+ h.links[off] = links[:numLinks]
+ }
+ }
+ } else {
+ h.links = h.links[:0]
+ }
+
+ for i, n := range lengths {
+ if n == 0 {
+ continue
+ }
+ code := nextcode[n]
+ nextcode[n]++
+ chunk := uint32(i<<huffmanValueShift | n)
+ reverse := int(bits.Reverse16(uint16(code)))
+ reverse >>= uint(16 - n)
+ if n <= huffmanChunkBits {
+ for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+ // We should never need to overwrite
+ // an existing chunk. Also, 0 is
+ // never a valid chunk, because the
+ // lower 4 "count" bits should be
+ // between 1 and 15.
+ if sanity && h.chunks[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[off] = chunk
+ }
+ } else {
+ j := reverse & (huffmanNumChunks - 1)
+ if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+ // Longer codes should have been
+ // associated with a link table above.
+ panic("impossible: not an indirect chunk")
+ }
+ value := h.chunks[j] >> huffmanValueShift
+ linktab := h.links[value]
+ reverse >>= huffmanChunkBits
+ for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+ if sanity && linktab[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ linktab[off] = chunk
+ }
+ }
+ }
+
+ if sanity {
+ // Above we've sanity checked that we never overwrote
+ // an existing entry. Here we additionally check that
+ // we filled the tables completely.
+ for i, chunk := range h.chunks {
+ if chunk == 0 {
+ // As an exception, in the degenerate
+ // single-code case, we allow odd
+ // chunks to be missing.
+ if code == 1 && i%2 == 1 {
+ continue
+ }
+ panic("impossible: missing chunk")
+ }
+ }
+ for _, linktab := range h.links {
+ for _, chunk := range linktab {
+ if chunk == 0 {
+ panic("impossible: missing chunk")
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// The actual read interface needed by NewReader.
+// If the passed in io.Reader does not also have ReadByte,
+// the NewReader will introduce its own buffering.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// Decompress state.
+type decompressor struct {
+ // Input source.
+ r Reader
+ roffset int64
+
+ // Input bits, in top of b.
+ b uint32
+ nb uint
+
+ // Huffman decoders for literal/length, distance.
+ h1, h2 huffmanDecoder
+
+ // Length arrays used to define Huffman codes.
+ bits *[maxNumLit + maxNumDist]int
+ codebits *[numCodes]int
+
+ // Output history, buffer.
+ dict dictDecoder
+
+ // Temporary buffer (avoids repeated allocation).
+ buf [4]byte
+
+ // Next step in the decompression,
+ // and decompression state.
+ step func(*decompressor)
+ stepState int
+ final bool
+ err error
+ toRead []byte
+ hl, hd *huffmanDecoder
+ copyLen int
+ copyDist int
+}
+
+func (f *decompressor) nextBlock() {
+ for f.nb < 1+2 {
+ if f.err = f.moreBits(); f.err != nil {
+ return
+ }
+ }
+ f.final = f.b&1 == 1
+ f.b >>= 1
+ typ := f.b & 3
+ f.b >>= 2
+ f.nb -= 1 + 2
+ switch typ {
+ case 0:
+ f.dataBlock()
+ case 1:
+ // compressed, fixed Huffman tables
+ f.hl = &fixedHuffmanDecoder
+ f.hd = nil
+ f.huffmanBlock()
+ case 2:
+ // compressed, dynamic Huffman tables
+ if f.err = f.readHuffman(); f.err != nil {
+ break
+ }
+ f.hl = &f.h1
+ f.hd = &f.h2
+ f.huffmanBlock()
+ default:
+ // 3 is reserved.
+ f.err = CorruptInputError(f.roffset)
+ }
+}
+
+func (f *decompressor) Read(b []byte) (int, error) {
+ for {
+ if len(f.toRead) > 0 {
+ n := copy(b, f.toRead)
+ f.toRead = f.toRead[n:]
+ if len(f.toRead) == 0 {
+ return n, f.err
+ }
+ return n, nil
+ }
+ if f.err != nil {
+ return 0, f.err
+ }
+ f.step(f)
+ if f.err != nil && len(f.toRead) == 0 {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ }
+ }
+}
+
+// Support the io.WriteTo interface for io.Copy and friends.
+func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
+ total := int64(0)
+ flushed := false
+ for {
+ if len(f.toRead) > 0 {
+ n, err := w.Write(f.toRead)
+ total += int64(n)
+ if err != nil {
+ f.err = err
+ return total, err
+ }
+ if n != len(f.toRead) {
+ return total, io.ErrShortWrite
+ }
+ f.toRead = f.toRead[:0]
+ }
+ if f.err != nil && flushed {
+ if f.err == io.EOF {
+ return total, nil
+ }
+ return total, f.err
+ }
+ if f.err == nil {
+ f.step(f)
+ }
+ if len(f.toRead) == 0 && f.err != nil && !flushed {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ flushed = true
+ }
+ }
+}
+
+func (f *decompressor) Close() error {
+ if f.err == io.EOF {
+ return nil
+ }
+ return f.err
+}
+
+// RFC 1951 section 3.2.7.
+// Compression with dynamic Huffman codes
+
+var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+func (f *decompressor) readHuffman() error {
+ // HLIT[5], HDIST[5], HCLEN[4].
+ for f.nb < 5+5+4 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ nlit := int(f.b&0x1F) + 257
+ if nlit > maxNumLit {
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ ndist := int(f.b&0x1F) + 1
+ if ndist > maxNumDist {
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ nclen := int(f.b&0xF) + 4
+ // numCodes is 19, so nclen is always valid.
+ f.b >>= 4
+ f.nb -= 5 + 5 + 4
+
+ // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
+ for i := 0; i < nclen; i++ {
+ for f.nb < 3 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ f.codebits[codeOrder[i]] = int(f.b & 0x7)
+ f.b >>= 3
+ f.nb -= 3
+ }
+ for i := nclen; i < len(codeOrder); i++ {
+ f.codebits[codeOrder[i]] = 0
+ }
+ if !f.h1.init(f.codebits[0:]) {
+ return CorruptInputError(f.roffset)
+ }
+
+ // HLIT + 257 code lengths, HDIST + 1 code lengths,
+ // using the code length Huffman code.
+ for i, n := 0, nlit+ndist; i < n; {
+ x, err := f.huffSym(&f.h1)
+ if err != nil {
+ return err
+ }
+ if x < 16 {
+ // Actual length.
+ f.bits[i] = x
+ i++
+ continue
+ }
+ // Repeat previous length or zero.
+ var rep int
+ var nb uint
+ var b int
+ switch x {
+ default:
+ return InternalError("unexpected length code")
+ case 16:
+ rep = 3
+ nb = 2
+ if i == 0 {
+ return CorruptInputError(f.roffset)
+ }
+ b = f.bits[i-1]
+ case 17:
+ rep = 3
+ nb = 3
+ b = 0
+ case 18:
+ rep = 11
+ nb = 7
+ b = 0
+ }
+ for f.nb < nb {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ rep += int(f.b & uint32(1<<nb-1))
+ f.b >>= nb
+ f.nb -= nb
+ if i+rep > n {
+ return CorruptInputError(f.roffset)
+ }
+ for j := 0; j < rep; j++ {
+ f.bits[i] = b
+ i++
+ }
+ }
+
+ if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
+ return CorruptInputError(f.roffset)
+ }
+
+ // As an optimization, we can initialize the min bits to read at a time
+ // for the HLIT tree to the length of the EOB marker since we know that
+ // every block must terminate with one. This preserves the property that
+ // we never read any extra bytes after the end of the DEFLATE stream.
+ if f.h1.min < f.bits[endBlockMarker] {
+ f.h1.min = f.bits[endBlockMarker]
+ }
+
+ return nil
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBlock() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ v, err := f.huffSym(f.hl)
+ if err != nil {
+ f.err = err
+ return
+ }
+ var n uint // number of bits extra
+ var length int
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBlock
+ f.stepState = stateInit
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ n = 0
+ case v < 269:
+ length = v*2 - (265*2 - 11)
+ n = 1
+ case v < 273:
+ length = v*4 - (269*4 - 19)
+ n = 2
+ case v < 277:
+ length = v*8 - (273*8 - 35)
+ n = 3
+ case v < 281:
+ length = v*16 - (277*16 - 67)
+ n = 4
+ case v < 285:
+ length = v*32 - (281*32 - 131)
+ n = 5
+ case v < maxNumLit:
+ length = 258
+ n = 0
+ default:
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ if n > 0 {
+ for f.nb < n {
+ if err = f.moreBits(); err != nil {
+ f.err = err
+ return
+ }
+ }
+ length += int(f.b & uint32(1<<n-1))
+ f.b >>= n
+ f.nb -= n
+ }
+
+ var dist int
+ if f.hd == nil {
+ for f.nb < 5 {
+ if err = f.moreBits(); err != nil {
+ f.err = err
+ return
+ }
+ }
+ dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
+ f.b >>= 5
+ f.nb -= 5
+ } else {
+ if dist, err = f.huffSym(f.hd); err != nil {
+ f.err = err
+ return
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << nb
+ for f.nb < nb {
+ if err = f.moreBits(); err != nil {
+ f.err = err
+ return
+ }
+ }
+ extra |= int(f.b & uint32(1<<nb-1))
+ f.b >>= nb
+ f.nb -= nb
+ dist = 1<<(nb+1) + 1 + extra
+ default:
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > f.dict.histSize() {
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, dist
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanBlock // We need to continue this work
+ f.stepState = stateDict
+ return
+ }
+ goto readLiteral
+ }
+}
+
+// Copy a single uncompressed data block from input to output.
+func (f *decompressor) dataBlock() {
+ // Uncompressed.
+ // Discard current half-byte.
+ f.nb = 0
+ f.b = 0
+
+ // Length then ones-complement of length.
+ nr, err := io.ReadFull(f.r, f.buf[0:4])
+ f.roffset += int64(nr)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+ n := int(f.buf[0]) | int(f.buf[1])<<8
+ nn := int(f.buf[2]) | int(f.buf[3])<<8
+ if uint16(nn) != uint16(^n) {
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ if n == 0 {
+ f.toRead = f.dict.readFlush()
+ f.finishBlock()
+ return
+ }
+
+ f.copyLen = n
+ f.copyData()
+}
+
+// copyData copies f.copyLen bytes from the underlying reader into f.hist.
+// It pauses for reads when f.hist is full.
+func (f *decompressor) copyData() {
+ buf := f.dict.writeSlice()
+ if len(buf) > f.copyLen {
+ buf = buf[:f.copyLen]
+ }
+
+ cnt, err := io.ReadFull(f.r, buf)
+ f.roffset += int64(cnt)
+ f.copyLen -= cnt
+ f.dict.writeMark(cnt)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).copyData
+ return
+ }
+ f.finishBlock()
+}
+
+func (f *decompressor) finishBlock() {
+ if f.final {
+ if f.dict.availRead() > 0 {
+ f.toRead = f.dict.readFlush()
+ }
+ f.err = io.EOF
+ }
+ f.step = (*decompressor).nextBlock
+}
+
+// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
+func noEOF(e error) error {
+ if e == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return e
+}
+
+func (f *decompressor) moreBits() error {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
+ return nil
+}
+
+// Read the next Huffman-encoded symbol from f according to h.
+func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(h.min)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ return 0, noEOF(err)
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & 31)
+ nb += 8
+ }
+ chunk := h.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ f.err = CorruptInputError(f.roffset)
+ return 0, f.err
+ }
+ f.b = b >> (n & 31)
+ f.nb = nb - n
+ return int(chunk >> huffmanValueShift), nil
+ }
+ }
+}
+
+func makeReader(r io.Reader) Reader {
+ if rr, ok := r.(Reader); ok {
+ return rr
+ }
+ return bufio.NewReader(r)
+}
+
+func fixedHuffmanDecoderInit() {
+ fixedOnce.Do(func() {
+ // These come from the RFC section 3.2.6.
+ var bits [288]int
+ for i := 0; i < 144; i++ {
+ bits[i] = 8
+ }
+ for i := 144; i < 256; i++ {
+ bits[i] = 9
+ }
+ for i := 256; i < 280; i++ {
+ bits[i] = 7
+ }
+ for i := 280; i < 288; i++ {
+ bits[i] = 8
+ }
+ fixedHuffmanDecoder.init(bits[:])
+ })
+}
+
+func (f *decompressor) Reset(r io.Reader, dict []byte) error {
+ *f = decompressor{
+ r: makeReader(r),
+ bits: f.bits,
+ codebits: f.codebits,
+ h1: f.h1,
+ h2: f.h2,
+ dict: f.dict,
+ step: (*decompressor).nextBlock,
+ }
+ f.dict.init(maxMatchOffset, dict)
+ return nil
+}
+
+// NewReader returns a new ReadCloser that can be used
+// to read the uncompressed version of r.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the ReadCloser
+// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReader(r io.Reader) io.ReadCloser {
+ fixedHuffmanDecoderInit()
+
+ var f decompressor
+ f.r = makeReader(r)
+ f.bits = new([maxNumLit + maxNumDist]int)
+ f.codebits = new([numCodes]int)
+ f.step = (*decompressor).nextBlock
+ f.dict.init(maxMatchOffset, nil)
+ return &f
+}
+
+// NewReaderDict is like NewReader but initializes the reader
+// with a preset dictionary. The returned Reader behaves as if
+// the uncompressed data stream started with the given dictionary,
+// which has already been read. NewReaderDict is typically used
+// to read data compressed by NewWriterDict.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
+ fixedHuffmanDecoderInit()
+
+ var f decompressor
+ f.r = makeReader(r)
+ f.bits = new([maxNumLit + maxNumDist]int)
+ f.codebits = new([numCodes]int)
+ f.step = (*decompressor).nextBlock
+ f.dict.init(maxMatchOffset, dict)
+ return &f
+}
diff --git a/vendor/github.com/klauspost/compress/flate/reverse_bits.go b/vendor/github.com/klauspost/compress/flate/reverse_bits.go
new file mode 100644
index 000000000..c1a02720d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/reverse_bits.go
@@ -0,0 +1,48 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+var reverseByte = [256]byte{
+ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
+ 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+ 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
+ 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+ 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
+ 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+ 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
+ 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+ 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
+ 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+ 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+ 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+ 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
+ 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+ 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
+ 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+ 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
+ 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+ 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
+ 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+ 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
+ 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+ 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
+ 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+ 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
+ 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+ 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
+ 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+ 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
+ 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+ 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
+ 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
+}
+
+func reverseUint16(v uint16) uint16 {
+ return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8
+}
+
+func reverseBits(number uint16, bitLength byte) uint16 {
+ return reverseUint16(number << uint8(16-bitLength))
+}
diff --git a/vendor/github.com/klauspost/compress/flate/snappy.go b/vendor/github.com/klauspost/compress/flate/snappy.go
new file mode 100644
index 000000000..d853320a7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/snappy.go
@@ -0,0 +1,900 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Modified for deflate by Klaus Post (c) 2015.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst *tokens, lit []byte) {
+ ol := int(dst.n)
+ for i, v := range lit {
+ dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
+ }
+ dst.n += uint16(len(lit))
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+func emitCopy(dst *tokens, offset, length int) {
+ dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize))
+ dst.n++
+}
+
+type snappyEnc interface {
+ Encode(dst *tokens, src []byte)
+ Reset()
+}
+
+func newSnappy(level int) snappyEnc {
+ switch level {
+ case 1:
+ return &snappyL1{}
+ case 2:
+ return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
+ case 3:
+ return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
+ case 4:
+ return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}}
+ default:
+ panic("invalid level specified")
+ }
+}
+
+const (
+ tableBits = 14 // Bits used in the table
+ tableSize = 1 << tableBits // Size of the table
+ tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
+ baseMatchOffset = 1 // The smallest match offset
+ baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
+ maxMatchOffset = 1 << 15 // The largest match offset
+)
+
+func load32(b []byte, i int) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func hash(u uint32) uint32 {
+ return (u * 0x1e35a7bd) >> tableShift
+}
+
+// snappyL1 encapsulates level 1 compression
+type snappyL1 struct{}
+
+func (e *snappyL1) Reset() {}
+
+func (e *snappyL1) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 16 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Initialize the hash table.
+ //
+ // The table element type is uint16, as s < sLimit and sLimit < len(src)
+ // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535.
+ var table [tableSize]uint16
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ nextHash := hash(load32(src, s))
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := 32
+
+ nextS := s
+ candidate := 0
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = int(table[nextHash&tableMask])
+ table[nextHash&tableMask] = uint16(s)
+ nextHash = hash(load32(src, nextS))
+ if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) {
+ break
+ }
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ emitLiteral(dst, src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+
+ // Extend the 4-byte match as long as possible.
+ //
+ // This is an inlined version of Snappy's:
+ // s = extendMatch(src, candidate+4, s+4)
+ s += 4
+ s1 := base + maxMatchLength
+ if s1 > len(src) {
+ s1 = len(src)
+ }
+ a := src[s:s1]
+ b := src[candidate+4:]
+ b = b[:len(a)]
+ l := len(a)
+ for i := range a {
+ if a[i] != b[i] {
+ l = i
+ break
+ }
+ }
+ s += l
+
+ // matchToken is flate's equivalent of Snappy's emitCopy.
+ dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset))
+ dst.n++
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load64(src, s-1)
+ prevHash := hash(uint32(x >> 0))
+ table[prevHash&tableMask] = uint16(s - 1)
+ currHash := hash(uint32(x >> 8))
+ candidate = int(table[currHash&tableMask])
+ table[currHash&tableMask] = uint16(s)
+ if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) {
+ nextHash = hash(uint32(x >> 16))
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
+
+type tableEntry struct {
+ val uint32
+ offset int32
+}
+
+func load3232(b []byte, i int32) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load6432(b []byte, i int32) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// snappyGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type snappyGen struct {
+ prev []byte
+ cur int32
+}
+
+// snappyGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type snappyL2 struct {
+ snappyGen
+ table [tableSize]tableEntry
+}
+
+// EncodeL2 uses a similar algorithm to level 1, but is capable
+// of matching across blocks giving better compression at a small slowdown.
+func (e *snappyL2) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 8 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ // Protect against e.cur wraparound.
+ if e.cur > 1<<30 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxStoreBlockSize
+ }
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ e.cur += maxStoreBlockSize
+ e.prev = e.prev[:0]
+ return
+ }
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := int32(0)
+ s := int32(0)
+ cv := load3232(src, s)
+ nextHash := hash(cv)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := int32(32)
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = e.table[nextHash&tableMask]
+ now := load3232(src, nextS)
+ e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv}
+ nextHash = hash(now)
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || cv != candidate.val {
+ // Out of range or not matched.
+ cv = now
+ continue
+ }
+ break
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ emitLiteral(dst, src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ //
+ s += 4
+ t := candidate.offset - e.cur + 4
+ l := e.matchlen(s, t, src)
+
+ // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
+ dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
+ dst.n++
+ s += l
+ nextEmit = s
+ if s >= sLimit {
+ t += l
+ // Index first pair after match end.
+ if int(t+4) < len(src) && t > 0 {
+ cv := load3232(src, t)
+ e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv}
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-1)
+ prevHash := hash(uint32(x))
+ e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)}
+ x >>= 8
+ currHash := hash(uint32(x))
+ candidate = e.table[currHash&tableMask]
+ e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x) != candidate.val {
+ cv = uint32(x >> 8)
+ nextHash = hash(cv)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ emitLiteral(dst, src[nextEmit:])
+ }
+ e.cur += int32(len(src))
+ e.prev = e.prev[:len(src)]
+ copy(e.prev, src)
+}
+
+type tableEntryPrev struct {
+ Cur tableEntry
+ Prev tableEntry
+}
+
+// snappyL3
+type snappyL3 struct {
+ snappyGen
+ table [tableSize]tableEntryPrev
+}
+
+// Encode uses a similar algorithm to level 2, will check up to two candidates.
+func (e *snappyL3) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 8 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ // Protect against e.cur wraparound.
+ if e.cur > 1<<30 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntryPrev{}
+ }
+ e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
+ }
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ e.cur += maxStoreBlockSize
+ e.prev = e.prev[:0]
+ return
+ }
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := int32(0)
+ s := int32(0)
+ cv := load3232(src, s)
+ nextHash := hash(cv)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := int32(32)
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidates := e.table[nextHash&tableMask]
+ now := load3232(src, nextS)
+ e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
+ nextHash = hash(now)
+
+ // Check both candidates
+ candidate = candidates.Cur
+ if cv == candidate.val {
+ offset := s - (candidate.offset - e.cur)
+ if offset <= maxMatchOffset {
+ break
+ }
+ } else {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if cv == candidate.val {
+ offset := s - (candidate.offset - e.cur)
+ if offset <= maxMatchOffset {
+ break
+ }
+ }
+ }
+ cv = now
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ emitLiteral(dst, src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ //
+ s += 4
+ t := candidate.offset - e.cur + 4
+ l := e.matchlen(s, t, src)
+
+ // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
+ dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
+ dst.n++
+ s += l
+ nextEmit = s
+ if s >= sLimit {
+ t += l
+ // Index first pair after match end.
+ if int(t+4) < len(src) && t > 0 {
+ cv := load3232(src, t)
+ nextHash = hash(cv)
+ e.table[nextHash&tableMask] = tableEntryPrev{
+ Prev: e.table[nextHash&tableMask].Cur,
+ Cur: tableEntry{offset: e.cur + t, val: cv},
+ }
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-3 to s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-3)
+ prevHash := hash(uint32(x))
+ e.table[prevHash&tableMask] = tableEntryPrev{
+ Prev: e.table[prevHash&tableMask].Cur,
+ Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
+ }
+ x >>= 8
+ prevHash = hash(uint32(x))
+
+ e.table[prevHash&tableMask] = tableEntryPrev{
+ Prev: e.table[prevHash&tableMask].Cur,
+ Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
+ }
+ x >>= 8
+ prevHash = hash(uint32(x))
+
+ e.table[prevHash&tableMask] = tableEntryPrev{
+ Prev: e.table[prevHash&tableMask].Cur,
+ Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
+ }
+ x >>= 8
+ currHash := hash(uint32(x))
+ candidates := e.table[currHash&tableMask]
+ cv = uint32(x)
+ e.table[currHash&tableMask] = tableEntryPrev{
+ Prev: candidates.Cur,
+ Cur: tableEntry{offset: s + e.cur, val: cv},
+ }
+
+ // Check both candidates
+ candidate = candidates.Cur
+ if cv == candidate.val {
+ offset := s - (candidate.offset - e.cur)
+ if offset <= maxMatchOffset {
+ continue
+ }
+ } else {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if cv == candidate.val {
+ offset := s - (candidate.offset - e.cur)
+ if offset <= maxMatchOffset {
+ continue
+ }
+ }
+ }
+ cv = uint32(x >> 8)
+ nextHash = hash(cv)
+ s++
+ break
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ emitLiteral(dst, src[nextEmit:])
+ }
+ e.cur += int32(len(src))
+ e.prev = e.prev[:len(src)]
+ copy(e.prev, src)
+}
+
+// snappyL4
+type snappyL4 struct {
+ snappyL3
+}
+
+// Encode uses a similar algorithm to level 3,
+// but will check up to two candidates if first isn't long enough.
+func (e *snappyL4) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 8 - 3
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ matchLenGood = 12
+ )
+
+ // Protect against e.cur wraparound.
+ if e.cur > 1<<30 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntryPrev{}
+ }
+ e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
+ }
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ e.cur += maxStoreBlockSize
+ e.prev = e.prev[:0]
+ return
+ }
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := int32(0)
+ s := int32(0)
+ cv := load3232(src, s)
+ nextHash := hash(cv)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := int32(32)
+
+ nextS := s
+ var candidate tableEntry
+ var candidateAlt tableEntry
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidates := e.table[nextHash&tableMask]
+ now := load3232(src, nextS)
+ e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
+ nextHash = hash(now)
+
+ // Check both candidates
+ candidate = candidates.Cur
+ if cv == candidate.val {
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset {
+ offset = s - (candidates.Prev.offset - e.cur)
+ if cv == candidates.Prev.val && offset < maxMatchOffset {
+ candidateAlt = candidates.Prev
+ }
+ break
+ }
+ } else {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if cv == candidate.val {
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset {
+ break
+ }
+ }
+ }
+ cv = now
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ emitLiteral(dst, src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ //
+ s += 4
+ t := candidate.offset - e.cur + 4
+ l := e.matchlen(s, t, src)
+ // Try alternative candidate if match length < matchLenGood.
+ if l < matchLenGood-4 && candidateAlt.offset != 0 {
+ t2 := candidateAlt.offset - e.cur + 4
+ l2 := e.matchlen(s, t2, src)
+ if l2 > l {
+ l = l2
+ t = t2
+ }
+ }
+ // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
+ dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
+ dst.n++
+ s += l
+ nextEmit = s
+ if s >= sLimit {
+ t += l
+ // Index first pair after match end.
+ if int(t+4) < len(src) && t > 0 {
+ cv := load3232(src, t)
+ nextHash = hash(cv)
+ e.table[nextHash&tableMask] = tableEntryPrev{
+ Prev: e.table[nextHash&tableMask].Cur,
+ Cur: tableEntry{offset: e.cur + t, val: cv},
+ }
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-3 to s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-3)
+ prevHash := hash(uint32(x))
+ e.table[prevHash&tableMask] = tableEntryPrev{
+ Prev: e.table[prevHash&tableMask].Cur,
+ Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
+ }
+ x >>= 8
+ prevHash = hash(uint32(x))
+
+ e.table[prevHash&tableMask] = tableEntryPrev{
+ Prev: e.table[prevHash&tableMask].Cur,
+ Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
+ }
+ x >>= 8
+ prevHash = hash(uint32(x))
+
+ e.table[prevHash&tableMask] = tableEntryPrev{
+ Prev: e.table[prevHash&tableMask].Cur,
+ Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
+ }
+ x >>= 8
+ currHash := hash(uint32(x))
+ candidates := e.table[currHash&tableMask]
+ cv = uint32(x)
+ e.table[currHash&tableMask] = tableEntryPrev{
+ Prev: candidates.Cur,
+ Cur: tableEntry{offset: s + e.cur, val: cv},
+ }
+
+ // Check both candidates
+ candidate = candidates.Cur
+ candidateAlt = tableEntry{}
+ if cv == candidate.val {
+ offset := s - (candidate.offset - e.cur)
+ if offset <= maxMatchOffset {
+ offset = s - (candidates.Prev.offset - e.cur)
+ if cv == candidates.Prev.val && offset <= maxMatchOffset {
+ candidateAlt = candidates.Prev
+ }
+ continue
+ }
+ } else {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if cv == candidate.val {
+ offset := s - (candidate.offset - e.cur)
+ if offset <= maxMatchOffset {
+ continue
+ }
+ }
+ }
+ cv = uint32(x >> 8)
+ nextHash = hash(cv)
+ s++
+ break
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ emitLiteral(dst, src[nextEmit:])
+ }
+ e.cur += int32(len(src))
+ e.prev = e.prev[:len(src)]
+ copy(e.prev, src)
+}
+
+func (e *snappyGen) matchlen(s, t int32, src []byte) int32 {
+ s1 := int(s) + maxMatchLength - 4
+ if s1 > len(src) {
+ s1 = len(src)
+ }
+
+ // If we are inside the current block
+ if t >= 0 {
+ b := src[t:]
+ a := src[s:s1]
+ b = b[:len(a)]
+ // Extend the match to be as long as possible.
+ for i := range a {
+ if a[i] != b[i] {
+ return int32(i)
+ }
+ }
+ return int32(len(a))
+ }
+
+ // We found a match in the previous block.
+ tp := int32(len(e.prev)) + t
+ if tp < 0 {
+ return 0
+ }
+
+ // Extend the match to be as long as possible.
+ a := src[s:s1]
+ b := e.prev[tp:]
+ if len(b) > len(a) {
+ b = b[:len(a)]
+ }
+ a = a[:len(b)]
+ for i := range b {
+ if a[i] != b[i] {
+ return int32(i)
+ }
+ }
+
+ // If we reached our limit, we matched everything we are
+ // allowed to in the previous block and we return.
+ n := int32(len(b))
+ if int(s+n) == s1 {
+ return n
+ }
+
+ // Continue looking for more matches in the current block.
+ a = src[s+n : s1]
+ b = src[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ return int32(i) + n
+ }
+ }
+ return int32(len(a)) + n
+}
+
+// Reset the encoding table.
+func (e *snappyGen) Reset() {
+ e.prev = e.prev[:0]
+ e.cur += maxMatchOffset
+}
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
new file mode 100644
index 000000000..4f275ea61
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -0,0 +1,115 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import "fmt"
+
+const (
+ // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
+ // 8 bits: xlength = length - MIN_MATCH_LENGTH
+ // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
+ lengthShift = 22
+ offsetMask = 1<<lengthShift - 1
+ typeMask = 3 << 30
+ literalType = 0 << 30
+ matchType = 1 << 30
+)
+
+// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
+// is lengthCodes[length - MIN_MATCH_LENGTH]
+var lengthCodes = [...]uint32{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
+ 13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
+ 17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
+ 18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 28,
+}
+
+var offsetCodes = [...]uint32{
+ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+}
+
+type token uint32
+
+type tokens struct {
+ tokens [maxStoreBlockSize + 1]token
+ n uint16 // Must be able to contain maxStoreBlockSize
+}
+
+// Convert a literal into a literal token.
+func literalToken(literal uint32) token { return token(literalType + literal) }
+
+// Convert a < xlength, xoffset > pair into a match token.
+func matchToken(xlength uint32, xoffset uint32) token {
+ return token(matchType + xlength<<lengthShift + xoffset)
+}
+
+func matchTokend(xlength uint32, xoffset uint32) token {
+ if xlength > maxMatchLength || xoffset > maxMatchOffset {
+ panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset))
+ return token(matchType)
+ }
+ return token(matchType + xlength<<lengthShift + xoffset)
+}
+
+// Returns the type of a token
+func (t token) typ() uint32 { return uint32(t) & typeMask }
+
+// Returns the literal of a literal token
+func (t token) literal() uint32 { return uint32(t - literalType) }
+
+// Returns the extra offset of a match token
+func (t token) offset() uint32 { return uint32(t) & offsetMask }
+
+func (t token) length() uint32 { return uint32((t - matchType) >> lengthShift) }
+
+func lengthCode(len uint32) uint32 { return lengthCodes[len] }
+
+// Returns the offset code corresponding to a specific offset
+func offsetCode(off uint32) uint32 {
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[off]
+ } else if off>>7 < uint32(len(offsetCodes)) {
+ return offsetCodes[off>>7] + 14
+ } else {
+ return offsetCodes[off>>14] + 28
+ }
+}
diff --git a/vendor/github.com/klauspost/cpuid/LICENSE b/vendor/github.com/klauspost/cpuid/LICENSE
new file mode 100644
index 000000000..5cec7ee94
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Klaus Post
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md
new file mode 100644
index 000000000..b2b6bee87
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/README.md
@@ -0,0 +1,145 @@
+# cpuid
+Package cpuid provides information about the CPU running the current program.
+
+CPU features are detected on startup, and kept for fast access through the life of the application.
+Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
+
+You can access the CPU information by accessing the shared CPU variable of the cpuid library.
+
+Package home: https://github.com/klauspost/cpuid
+
+[![GoDoc][1]][2] [![Build Status][3]][4]
+
+[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
+[2]: https://godoc.org/github.com/klauspost/cpuid
+[3]: https://travis-ci.org/klauspost/cpuid.svg
+[4]: https://travis-ci.org/klauspost/cpuid
+
+# features
+## CPU Instructions
+* **CMOV** (i686 CMOV)
+* **NX** (NX (No-Execute) bit)
+* **AMD3DNOW** (AMD 3DNOW)
+* **AMD3DNOWEXT** (AMD 3DNowExt)
+* **MMX** (standard MMX)
+* **MMXEXT** (SSE integer functions or AMD MMX ext)
+* **SSE** (SSE functions)
+* **SSE2** (P4 SSE functions)
+* **SSE3** (Prescott SSE3 functions)
+* **SSSE3** (Conroe SSSE3 functions)
+* **SSE4** (Penryn SSE4.1 functions)
+* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions)
+* **SSE42** (Nehalem SSE4.2 functions)
+* **AVX** (AVX functions)
+* **AVX2** (AVX2 functions)
+* **FMA3** (Intel FMA 3)
+* **FMA4** (Bulldozer FMA4 functions)
+* **XOP** (Bulldozer XOP functions)
+* **F16C** (Half-precision floating-point conversion)
+* **BMI1** (Bit Manipulation Instruction Set 1)
+* **BMI2** (Bit Manipulation Instruction Set 2)
+* **TBM** (AMD Trailing Bit Manipulation)
+* **LZCNT** (LZCNT instruction)
+* **POPCNT** (POPCNT instruction)
+* **AESNI** (Advanced Encryption Standard New Instructions)
+* **CLMUL** (Carry-less Multiplication)
+* **HTT** (Hyperthreading (enabled))
+* **HLE** (Hardware Lock Elision)
+* **RTM** (Restricted Transactional Memory)
+* **RDRAND** (RDRAND instruction is available)
+* **RDSEED** (RDSEED instruction is available)
+* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
+* **SHA** (Intel SHA Extensions)
+* **AVX512F** (AVX-512 Foundation)
+* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions)
+* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions)
+* **AVX512PF** (AVX-512 Prefetch Instructions)
+* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions)
+* **AVX512CD** (AVX-512 Conflict Detection Instructions)
+* **AVX512BW** (AVX-512 Byte and Word Instructions)
+* **AVX512VL** (AVX-512 Vector Length Extensions)
+* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions)
+* **MPX** (Intel MPX (Memory Protection Extensions))
+* **ERMS** (Enhanced REP MOVSB/STOSB)
+* **RDTSCP** (RDTSCP Instruction)
+* **CX16** (CMPXCHG16B Instruction)
+* **SGX** (Software Guard Extensions, with activation details)
+
+## Performance
+* **RDTSCP()** Returns current cycle count. Can be used for benchmarking.
+* **SSE2SLOW** (SSE2 is supported, but usually not faster)
+* **SSE3SLOW** (SSE3 is supported, but usually not faster)
+* **ATOM** (Atom processor, some SSSE3 instructions are slower)
+* **Cache line** (Probable size of a cache line).
+* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
+
+## Cpu Vendor/VM
+* **Intel**
+* **AMD**
+* **VIA**
+* **Transmeta**
+* **NSC**
+* **KVM** (Kernel-based Virtual Machine)
+* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
+* **VMware**
+* **XenHVM**
+
+# installing
+
+```go get github.com/klauspost/cpuid```
+
+# example
+
+```Go
+package main
+
+import (
+ "fmt"
+ "github.com/klauspost/cpuid"
+)
+
+func main() {
+ // Print basic CPU information:
+ fmt.Println("Name:", cpuid.CPU.BrandName)
+ fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores)
+ fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore)
+ fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores)
+ fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model)
+ fmt.Println("Features:", cpuid.CPU.Features)
+ fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine)
+ fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes")
+ fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes")
+ fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes")
+ fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes")
+
+ // Test if we have a specific feature:
+ if cpuid.CPU.SSE() {
+ fmt.Println("We have Streaming SIMD Extensions")
+ }
+}
+```
+
+Sample output:
+```
+>go run main.go
+Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz
+PhysicalCores: 2
+ThreadsPerCore: 2
+LogicalCores: 4
+Family 6 Model: 42
+Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL
+Cacheline bytes: 64
+We have Streaming SIMD Extensions
+```
+
+# private package
+
+In the "private" folder you can find an autogenerated version of the library you can include in your own packages.
+
+For this purpose all exports are removed, and functions and constants are lowercased.
+
+This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages.
+
+# license
+
+This code is published under an MIT license. See LICENSE file for more information.
diff --git a/vendor/github.com/klauspost/cpuid/cpuid.go b/vendor/github.com/klauspost/cpuid/cpuid.go
new file mode 100644
index 000000000..60c681bed
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/cpuid.go
@@ -0,0 +1,1040 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// Package cpuid provides information about the CPU running the current program.
+//
+// CPU features are detected on startup, and kept for fast access through the life of the application.
+// Currently x86 / x64 (AMD64) is supported.
+//
+// You can access the CPU information by accessing the shared CPU variable of the cpuid library.
+//
+// Package home: https://github.com/klauspost/cpuid
+package cpuid
+
+import "strings"
+
+// Vendor is a representation of a CPU vendor.
+type Vendor int
+
+const (
+ Other Vendor = iota
+ Intel
+ AMD
+ VIA
+ Transmeta
+ NSC
+ KVM // Kernel-based Virtual Machine
+ MSVM // Microsoft Hyper-V or Windows Virtual PC
+ VMware
+ XenHVM
+)
+
+const (
+ CMOV = 1 << iota // i686 CMOV
+ NX // NX (No-Execute) bit
+ AMD3DNOW // AMD 3DNOW
+ AMD3DNOWEXT // AMD 3DNowExt
+ MMX // standard MMX
+ MMXEXT // SSE integer functions or AMD MMX ext
+ SSE // SSE functions
+ SSE2 // P4 SSE functions
+ SSE3 // Prescott SSE3 functions
+ SSSE3 // Conroe SSSE3 functions
+ SSE4 // Penryn SSE4.1 functions
+ SSE4A // AMD Barcelona microarchitecture SSE4a instructions
+ SSE42 // Nehalem SSE4.2 functions
+ AVX // AVX functions
+ AVX2 // AVX2 functions
+ FMA3 // Intel FMA 3
+ FMA4 // Bulldozer FMA4 functions
+ XOP // Bulldozer XOP functions
+ F16C // Half-precision floating-point conversion
+ BMI1 // Bit Manipulation Instruction Set 1
+ BMI2 // Bit Manipulation Instruction Set 2
+ TBM // AMD Trailing Bit Manipulation
+ LZCNT // LZCNT instruction
+ POPCNT // POPCNT instruction
+ AESNI // Advanced Encryption Standard New Instructions
+ CLMUL // Carry-less Multiplication
+ HTT // Hyperthreading (enabled)
+ HLE // Hardware Lock Elision
+ RTM // Restricted Transactional Memory
+ RDRAND // RDRAND instruction is available
+ RDSEED // RDSEED instruction is available
+ ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
+ SHA // Intel SHA Extensions
+ AVX512F // AVX-512 Foundation
+ AVX512DQ // AVX-512 Doubleword and Quadword Instructions
+ AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions
+ AVX512PF // AVX-512 Prefetch Instructions
+ AVX512ER // AVX-512 Exponential and Reciprocal Instructions
+ AVX512CD // AVX-512 Conflict Detection Instructions
+ AVX512BW // AVX-512 Byte and Word Instructions
+ AVX512VL // AVX-512 Vector Length Extensions
+ AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions
+ MPX // Intel MPX (Memory Protection Extensions)
+ ERMS // Enhanced REP MOVSB/STOSB
+ RDTSCP // RDTSCP Instruction
+ CX16 // CMPXCHG16B Instruction
+ SGX // Software Guard Extensions
+ IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
+ STIBP // Single Thread Indirect Branch Predictors
+
+ // Performance indicators
+ SSE2SLOW // SSE2 is supported, but usually not faster
+ SSE3SLOW // SSE3 is supported, but usually not faster
+ ATOM // Atom processor, some SSSE3 instructions are slower
+)
+
+var flagNames = map[Flags]string{
+ CMOV: "CMOV", // i686 CMOV
+ NX: "NX", // NX (No-Execute) bit
+ AMD3DNOW: "AMD3DNOW", // AMD 3DNOW
+ AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt
+ MMX: "MMX", // Standard MMX
+ MMXEXT: "MMXEXT", // SSE integer functions or AMD MMX ext
+ SSE: "SSE", // SSE functions
+ SSE2: "SSE2", // P4 SSE2 functions
+ SSE3: "SSE3", // Prescott SSE3 functions
+ SSSE3: "SSSE3", // Conroe SSSE3 functions
+ SSE4: "SSE4.1", // Penryn SSE4.1 functions
+ SSE4A: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions
+ SSE42: "SSE4.2", // Nehalem SSE4.2 functions
+ AVX: "AVX", // AVX functions
+ AVX2: "AVX2", // AVX functions
+ FMA3: "FMA3", // Intel FMA 3
+ FMA4: "FMA4", // Bulldozer FMA4 functions
+ XOP: "XOP", // Bulldozer XOP functions
+ F16C: "F16C", // Half-precision floating-point conversion
+ BMI1: "BMI1", // Bit Manipulation Instruction Set 1
+ BMI2: "BMI2", // Bit Manipulation Instruction Set 2
+ TBM: "TBM", // AMD Trailing Bit Manipulation
+ LZCNT: "LZCNT", // LZCNT instruction
+ POPCNT: "POPCNT", // POPCNT instruction
+ AESNI: "AESNI", // Advanced Encryption Standard New Instructions
+ CLMUL: "CLMUL", // Carry-less Multiplication
+ HTT: "HTT", // Hyperthreading (enabled)
+ HLE: "HLE", // Hardware Lock Elision
+ RTM: "RTM", // Restricted Transactional Memory
+ RDRAND: "RDRAND", // RDRAND instruction is available
+ RDSEED: "RDSEED", // RDSEED instruction is available
+ ADX: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
+ SHA: "SHA", // Intel SHA Extensions
+ AVX512F: "AVX512F", // AVX-512 Foundation
+ AVX512DQ: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions
+ AVX512IFMA: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions
+ AVX512PF: "AVX512PF", // AVX-512 Prefetch Instructions
+ AVX512ER: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions
+ AVX512CD: "AVX512CD", // AVX-512 Conflict Detection Instructions
+ AVX512BW: "AVX512BW", // AVX-512 Byte and Word Instructions
+ AVX512VL: "AVX512VL", // AVX-512 Vector Length Extensions
+ AVX512VBMI: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions
+ MPX: "MPX", // Intel MPX (Memory Protection Extensions)
+ ERMS: "ERMS", // Enhanced REP MOVSB/STOSB
+ RDTSCP: "RDTSCP", // RDTSCP Instruction
+ CX16: "CX16", // CMPXCHG16B Instruction
+ SGX: "SGX", // Software Guard Extensions
+ IBPB: "IBPB", // Indirect Branch Restricted Speculation and Indirect Branch Predictor Barrier
+ STIBP: "STIBP", // Single Thread Indirect Branch Predictors
+
+ // Performance indicators
+ SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster
+ SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster
+ ATOM: "ATOM", // Atom processor, some SSSE3 instructions are slower
+
+}
+
+// CPUInfo contains information about the detected system CPU.
+type CPUInfo struct {
+ BrandName string // Brand name reported by the CPU
+ VendorID Vendor // Comparable CPU vendor ID
+ Features Flags // Features of the CPU
+ PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable.
+ ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable.
+ LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
+ Family int // CPU family number
+ Model int // CPU model number
+ CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
+ Cache struct {
+ L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
+ L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected
+ L2 int // L2 Cache (per core or shared). Will be -1 if undetected
+ L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected
+ }
+ SGX SGXSupport
+ maxFunc uint32
+ maxExFunc uint32
+}
+
+var cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
+var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+var xgetbv func(index uint32) (eax, edx uint32)
+var rdtscpAsm func() (eax, ebx, ecx, edx uint32)
+
+// CPU contains information about the CPU as detected on startup,
+// or when Detect last was called.
+//
+// Use this as the primary entry point to you data,
+// this way queries are
+var CPU CPUInfo
+
+func init() {
+ initCPU()
+ Detect()
+}
+
+// Detect will re-detect current CPU info.
+// This will replace the content of the exported CPU variable.
+//
+// Unless you expect the CPU to change while you are running your program
+// you should not need to call this function.
+// If you call this, you must ensure that no other goroutine is accessing the
+// exported CPU variable.
+func Detect() {
+ CPU.maxFunc = maxFunctionID()
+ CPU.maxExFunc = maxExtendedFunction()
+ CPU.BrandName = brandName()
+ CPU.CacheLine = cacheLine()
+ CPU.Family, CPU.Model = familyModel()
+ CPU.Features = support()
+ CPU.SGX = hasSGX(CPU.Features&SGX != 0)
+ CPU.ThreadsPerCore = threadsPerCore()
+ CPU.LogicalCores = logicalCores()
+ CPU.PhysicalCores = physicalCores()
+ CPU.VendorID = vendorID()
+ CPU.cacheSize()
+}
+
+// Generated here: http://play.golang.org/p/BxFH2Gdc0G
+
+// Cmov indicates support of CMOV instructions
+func (c CPUInfo) Cmov() bool {
+ return c.Features&CMOV != 0
+}
+
+// Amd3dnow indicates support of AMD 3DNOW! instructions
+func (c CPUInfo) Amd3dnow() bool {
+ return c.Features&AMD3DNOW != 0
+}
+
+// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions
+func (c CPUInfo) Amd3dnowExt() bool {
+ return c.Features&AMD3DNOWEXT != 0
+}
+
+// MMX indicates support of MMX instructions
+func (c CPUInfo) MMX() bool {
+ return c.Features&MMX != 0
+}
+
+// MMXExt indicates support of MMXEXT instructions
+// (SSE integer functions or AMD MMX ext)
+func (c CPUInfo) MMXExt() bool {
+ return c.Features&MMXEXT != 0
+}
+
+// SSE indicates support of SSE instructions
+func (c CPUInfo) SSE() bool {
+ return c.Features&SSE != 0
+}
+
+// SSE2 indicates support of SSE 2 instructions
+func (c CPUInfo) SSE2() bool {
+ return c.Features&SSE2 != 0
+}
+
+// SSE3 indicates support of SSE 3 instructions
+func (c CPUInfo) SSE3() bool {
+ return c.Features&SSE3 != 0
+}
+
+// SSSE3 indicates support of SSSE 3 instructions
+func (c CPUInfo) SSSE3() bool {
+ return c.Features&SSSE3 != 0
+}
+
+// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions
+func (c CPUInfo) SSE4() bool {
+ return c.Features&SSE4 != 0
+}
+
+// SSE42 indicates support of SSE4.2 instructions
+func (c CPUInfo) SSE42() bool {
+ return c.Features&SSE42 != 0
+}
+
+// AVX indicates support of AVX instructions
+// and operating system support of AVX instructions
+func (c CPUInfo) AVX() bool {
+ return c.Features&AVX != 0
+}
+
+// AVX2 indicates support of AVX2 instructions
+func (c CPUInfo) AVX2() bool {
+ return c.Features&AVX2 != 0
+}
+
+// FMA3 indicates support of FMA3 instructions
+func (c CPUInfo) FMA3() bool {
+ return c.Features&FMA3 != 0
+}
+
+// FMA4 indicates support of FMA4 instructions
+func (c CPUInfo) FMA4() bool {
+ return c.Features&FMA4 != 0
+}
+
+// XOP indicates support of XOP instructions
+func (c CPUInfo) XOP() bool {
+ return c.Features&XOP != 0
+}
+
+// F16C indicates support of F16C instructions
+func (c CPUInfo) F16C() bool {
+ return c.Features&F16C != 0
+}
+
+// BMI1 indicates support of BMI1 instructions
+func (c CPUInfo) BMI1() bool {
+ return c.Features&BMI1 != 0
+}
+
+// BMI2 indicates support of BMI2 instructions
+func (c CPUInfo) BMI2() bool {
+ return c.Features&BMI2 != 0
+}
+
+// TBM indicates support of TBM instructions
+// (AMD Trailing Bit Manipulation)
+func (c CPUInfo) TBM() bool {
+ return c.Features&TBM != 0
+}
+
+// Lzcnt indicates support of LZCNT instruction
+func (c CPUInfo) Lzcnt() bool {
+ return c.Features&LZCNT != 0
+}
+
+// Popcnt indicates support of POPCNT instruction
+func (c CPUInfo) Popcnt() bool {
+ return c.Features&POPCNT != 0
+}
+
+// HTT indicates the processor has Hyperthreading enabled
+func (c CPUInfo) HTT() bool {
+ return c.Features&HTT != 0
+}
+
+// SSE2Slow indicates that SSE2 may be slow on this processor
+func (c CPUInfo) SSE2Slow() bool {
+ return c.Features&SSE2SLOW != 0
+}
+
+// SSE3Slow indicates that SSE3 may be slow on this processor
+func (c CPUInfo) SSE3Slow() bool {
+ return c.Features&SSE3SLOW != 0
+}
+
+// AesNi indicates support of AES-NI instructions
+// (Advanced Encryption Standard New Instructions)
+func (c CPUInfo) AesNi() bool {
+ return c.Features&AESNI != 0
+}
+
+// Clmul indicates support of CLMUL instructions
+// (Carry-less Multiplication)
+func (c CPUInfo) Clmul() bool {
+ return c.Features&CLMUL != 0
+}
+
+// NX indicates support of NX (No-Execute) bit
+func (c CPUInfo) NX() bool {
+ return c.Features&NX != 0
+}
+
+// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions
+func (c CPUInfo) SSE4A() bool {
+ return c.Features&SSE4A != 0
+}
+
+// HLE indicates support of Hardware Lock Elision
+func (c CPUInfo) HLE() bool {
+ return c.Features&HLE != 0
+}
+
+// RTM indicates support of Restricted Transactional Memory
+func (c CPUInfo) RTM() bool {
+ return c.Features&RTM != 0
+}
+
+// Rdrand indicates support of RDRAND instruction is available
+func (c CPUInfo) Rdrand() bool {
+ return c.Features&RDRAND != 0
+}
+
+// Rdseed indicates support of RDSEED instruction is available
+func (c CPUInfo) Rdseed() bool {
+ return c.Features&RDSEED != 0
+}
+
+// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
+func (c CPUInfo) ADX() bool {
+ return c.Features&ADX != 0
+}
+
+// SHA indicates support of Intel SHA Extensions
+func (c CPUInfo) SHA() bool {
+ return c.Features&SHA != 0
+}
+
+// AVX512F indicates support of AVX-512 Foundation
+func (c CPUInfo) AVX512F() bool {
+ return c.Features&AVX512F != 0
+}
+
+// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions
+func (c CPUInfo) AVX512DQ() bool {
+ return c.Features&AVX512DQ != 0
+}
+
+// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions
+func (c CPUInfo) AVX512IFMA() bool {
+ return c.Features&AVX512IFMA != 0
+}
+
+// AVX512PF indicates support of AVX-512 Prefetch Instructions
+func (c CPUInfo) AVX512PF() bool {
+ return c.Features&AVX512PF != 0
+}
+
+// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions
+func (c CPUInfo) AVX512ER() bool {
+ return c.Features&AVX512ER != 0
+}
+
+// AVX512CD indicates support of AVX-512 Conflict Detection Instructions
+func (c CPUInfo) AVX512CD() bool {
+ return c.Features&AVX512CD != 0
+}
+
+// AVX512BW indicates support of AVX-512 Byte and Word Instructions
+func (c CPUInfo) AVX512BW() bool {
+ return c.Features&AVX512BW != 0
+}
+
+// AVX512VL indicates support of AVX-512 Vector Length Extensions
+func (c CPUInfo) AVX512VL() bool {
+ return c.Features&AVX512VL != 0
+}
+
+// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions
+func (c CPUInfo) AVX512VBMI() bool {
+ return c.Features&AVX512VBMI != 0
+}
+
+// MPX indicates support of Intel MPX (Memory Protection Extensions)
+func (c CPUInfo) MPX() bool {
+ return c.Features&MPX != 0
+}
+
+// ERMS indicates support of Enhanced REP MOVSB/STOSB
+func (c CPUInfo) ERMS() bool {
+ return c.Features&ERMS != 0
+}
+
+// RDTSCP Instruction is available.
+func (c CPUInfo) RDTSCP() bool {
+ return c.Features&RDTSCP != 0
+}
+
+// CX16 indicates if CMPXCHG16B instruction is available.
+func (c CPUInfo) CX16() bool {
+ return c.Features&CX16 != 0
+}
+
+// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection.
+// So TSX simply checks that.
+func (c CPUInfo) TSX() bool {
+ return c.Features&(HLE|RTM) == HLE|RTM
+}
+
+// Atom indicates an Atom processor
+func (c CPUInfo) Atom() bool {
+ return c.Features&ATOM != 0
+}
+
+// Intel returns true if vendor is recognized as Intel
+func (c CPUInfo) Intel() bool {
+ return c.VendorID == Intel
+}
+
+// AMD returns true if vendor is recognized as AMD
+func (c CPUInfo) AMD() bool {
+ return c.VendorID == AMD
+}
+
+// Transmeta returns true if vendor is recognized as Transmeta
+func (c CPUInfo) Transmeta() bool {
+ return c.VendorID == Transmeta
+}
+
+// NSC returns true if vendor is recognized as National Semiconductor
+func (c CPUInfo) NSC() bool {
+ return c.VendorID == NSC
+}
+
+// VIA returns true if vendor is recognized as VIA
+func (c CPUInfo) VIA() bool {
+ return c.VendorID == VIA
+}
+
+// RTCounter returns the 64-bit time-stamp counter
+// Uses the RDTSCP instruction. The value 0 is returned
+// if the CPU does not support the instruction.
+func (c CPUInfo) RTCounter() uint64 {
+ if !c.RDTSCP() {
+ return 0
+ }
+ a, _, _, d := rdtscpAsm()
+ return uint64(a) | (uint64(d) << 32)
+}
+
+// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP.
+// This variable is OS dependent, but on Linux contains information
+// about the current cpu/core the code is running on.
+// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned.
+func (c CPUInfo) Ia32TscAux() uint32 {
+ if !c.RDTSCP() {
+ return 0
+ }
+ _, _, ecx, _ := rdtscpAsm()
+ return ecx
+}
+
+// LogicalCPU will return the Logical CPU the code is currently executing on.
+// This is likely to change when the OS re-schedules the running thread
+// to another CPU.
+// If the current core cannot be detected, -1 will be returned.
+func (c CPUInfo) LogicalCPU() int {
+ if c.maxFunc < 1 {
+ return -1
+ }
+ _, ebx, _, _ := cpuid(1)
+ return int(ebx >> 24)
+}
+
+// VM Will return true if the cpu id indicates we are in
+// a virtual machine. This is only a hint, and will very likely
+// have many false negatives.
+func (c CPUInfo) VM() bool {
+ switch c.VendorID {
+ case MSVM, KVM, VMware, XenHVM:
+ return true
+ }
+ return false
+}
+
+// Flags contains detected cpu features and caracteristics
+type Flags uint64
+
+// String returns a string representation of the detected
+// CPU features.
+func (f Flags) String() string {
+ return strings.Join(f.Strings(), ",")
+}
+
+// Strings returns and array of the detected features.
+func (f Flags) Strings() []string {
+ s := support()
+ r := make([]string, 0, 20)
+ for i := uint(0); i < 64; i++ {
+ key := Flags(1 << i)
+ val := flagNames[key]
+ if s&key != 0 {
+ r = append(r, val)
+ }
+ }
+ return r
+}
+
+func maxExtendedFunction() uint32 {
+ eax, _, _, _ := cpuid(0x80000000)
+ return eax
+}
+
+func maxFunctionID() uint32 {
+ a, _, _, _ := cpuid(0)
+ return a
+}
+
+func brandName() string {
+ if maxExtendedFunction() >= 0x80000004 {
+ v := make([]uint32, 0, 48)
+ for i := uint32(0); i < 3; i++ {
+ a, b, c, d := cpuid(0x80000002 + i)
+ v = append(v, a, b, c, d)
+ }
+ return strings.Trim(string(valAsString(v...)), " ")
+ }
+ return "unknown"
+}
+
+func threadsPerCore() int {
+ mfi := maxFunctionID()
+ if mfi < 0x4 || vendorID() != Intel {
+ return 1
+ }
+
+ if mfi < 0xb {
+ _, b, _, d := cpuid(1)
+ if (d & (1 << 28)) != 0 {
+ // v will contain logical core count
+ v := (b >> 16) & 255
+ if v > 1 {
+ a4, _, _, _ := cpuid(4)
+ // physical cores
+ v2 := (a4 >> 26) + 1
+ if v2 > 0 {
+ return int(v) / int(v2)
+ }
+ }
+ }
+ return 1
+ }
+ _, b, _, _ := cpuidex(0xb, 0)
+ if b&0xffff == 0 {
+ return 1
+ }
+ return int(b & 0xffff)
+}
+
+func logicalCores() int {
+ mfi := maxFunctionID()
+ switch vendorID() {
+ case Intel:
+ // Use this on old Intel processors
+ if mfi < 0xb {
+ if mfi < 1 {
+ return 0
+ }
+ // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID)
+ // that can be assigned to logical processors in a physical package.
+ // The value may not be the same as the number of logical processors that are present in the hardware of a physical package.
+ _, ebx, _, _ := cpuid(1)
+ logical := (ebx >> 16) & 0xff
+ return int(logical)
+ }
+ _, b, _, _ := cpuidex(0xb, 1)
+ return int(b & 0xffff)
+ case AMD:
+ _, b, _, _ := cpuid(1)
+ return int((b >> 16) & 0xff)
+ default:
+ return 0
+ }
+}
+
+func familyModel() (int, int) {
+ if maxFunctionID() < 0x1 {
+ return 0, 0
+ }
+ eax, _, _, _ := cpuid(1)
+ family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff)
+ model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0)
+ return int(family), int(model)
+}
+
+func physicalCores() int {
+ switch vendorID() {
+ case Intel:
+ return logicalCores() / threadsPerCore()
+ case AMD:
+ if maxExtendedFunction() >= 0x80000008 {
+ _, _, c, _ := cpuid(0x80000008)
+ return int(c&0xff) + 1
+ }
+ }
+ return 0
+}
+
+// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
+var vendorMapping = map[string]Vendor{
+ "AMDisbetter!": AMD,
+ "AuthenticAMD": AMD,
+ "CentaurHauls": VIA,
+ "GenuineIntel": Intel,
+ "TransmetaCPU": Transmeta,
+ "GenuineTMx86": Transmeta,
+ "Geode by NSC": NSC,
+ "VIA VIA VIA ": VIA,
+ "KVMKVMKVMKVM": KVM,
+ "Microsoft Hv": MSVM,
+ "VMwareVMware": VMware,
+ "XenVMMXenVMM": XenHVM,
+}
+
+func vendorID() Vendor {
+ _, b, c, d := cpuid(0)
+ v := valAsString(b, d, c)
+ vend, ok := vendorMapping[string(v)]
+ if !ok {
+ return Other
+ }
+ return vend
+}
+
+func cacheLine() int {
+ if maxFunctionID() < 0x1 {
+ return 0
+ }
+
+ _, ebx, _, _ := cpuid(1)
+ cache := (ebx & 0xff00) >> 5 // cflush size
+ if cache == 0 && maxExtendedFunction() >= 0x80000006 {
+ _, _, ecx, _ := cpuid(0x80000006)
+ cache = ecx & 0xff // cacheline size
+ }
+ // TODO: Read from Cache and TLB Information
+ return int(cache)
+}
+
+func (c *CPUInfo) cacheSize() {
+ c.Cache.L1D = -1
+ c.Cache.L1I = -1
+ c.Cache.L2 = -1
+ c.Cache.L3 = -1
+ vendor := vendorID()
+ switch vendor {
+ case Intel:
+ if maxFunctionID() < 4 {
+ return
+ }
+ for i := uint32(0); ; i++ {
+ eax, ebx, ecx, _ := cpuidex(4, i)
+ cacheType := eax & 15
+ if cacheType == 0 {
+ break
+ }
+ cacheLevel := (eax >> 5) & 7
+ coherency := int(ebx&0xfff) + 1
+ partitions := int((ebx>>12)&0x3ff) + 1
+ associativity := int((ebx>>22)&0x3ff) + 1
+ sets := int(ecx) + 1
+ size := associativity * partitions * coherency * sets
+ switch cacheLevel {
+ case 1:
+ if cacheType == 1 {
+ // 1 = Data Cache
+ c.Cache.L1D = size
+ } else if cacheType == 2 {
+ // 2 = Instruction Cache
+ c.Cache.L1I = size
+ } else {
+ if c.Cache.L1D < 0 {
+ c.Cache.L1I = size
+ }
+ if c.Cache.L1I < 0 {
+ c.Cache.L1I = size
+ }
+ }
+ case 2:
+ c.Cache.L2 = size
+ case 3:
+ c.Cache.L3 = size
+ }
+ }
+ case AMD:
+ // Untested.
+ if maxExtendedFunction() < 0x80000005 {
+ return
+ }
+ _, _, ecx, edx := cpuid(0x80000005)
+ c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024)
+ c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024)
+
+ if maxExtendedFunction() < 0x80000006 {
+ return
+ }
+ _, _, ecx, _ = cpuid(0x80000006)
+ c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
+ }
+
+ return
+}
+
+type SGXSupport struct {
+ Available bool
+ SGX1Supported bool
+ SGX2Supported bool
+ MaxEnclaveSizeNot64 int64
+ MaxEnclaveSize64 int64
+}
+
+func hasSGX(available bool) (rval SGXSupport) {
+ rval.Available = available
+
+ if !available {
+ return
+ }
+
+ a, _, _, d := cpuidex(0x12, 0)
+ rval.SGX1Supported = a&0x01 != 0
+ rval.SGX2Supported = a&0x02 != 0
+ rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2
+ rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2
+
+ return
+}
+
+func support() Flags {
+ mfi := maxFunctionID()
+ vend := vendorID()
+ if mfi < 0x1 {
+ return 0
+ }
+ rval := uint64(0)
+ _, _, c, d := cpuid(1)
+ if (d & (1 << 15)) != 0 {
+ rval |= CMOV
+ }
+ if (d & (1 << 23)) != 0 {
+ rval |= MMX
+ }
+ if (d & (1 << 25)) != 0 {
+ rval |= MMXEXT
+ }
+ if (d & (1 << 25)) != 0 {
+ rval |= SSE
+ }
+ if (d & (1 << 26)) != 0 {
+ rval |= SSE2
+ }
+ if (c & 1) != 0 {
+ rval |= SSE3
+ }
+ if (c & 0x00000200) != 0 {
+ rval |= SSSE3
+ }
+ if (c & 0x00080000) != 0 {
+ rval |= SSE4
+ }
+ if (c & 0x00100000) != 0 {
+ rval |= SSE42
+ }
+ if (c & (1 << 25)) != 0 {
+ rval |= AESNI
+ }
+ if (c & (1 << 1)) != 0 {
+ rval |= CLMUL
+ }
+ if c&(1<<23) != 0 {
+ rval |= POPCNT
+ }
+ if c&(1<<30) != 0 {
+ rval |= RDRAND
+ }
+ if c&(1<<29) != 0 {
+ rval |= F16C
+ }
+ if c&(1<<13) != 0 {
+ rval |= CX16
+ }
+ if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 {
+ if threadsPerCore() > 1 {
+ rval |= HTT
+ }
+ }
+
+ // Check XGETBV, OXSAVE and AVX bits
+ if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 {
+ // Check for OS support
+ eax, _ := xgetbv(0)
+ if (eax & 0x6) == 0x6 {
+ rval |= AVX
+ if (c & 0x00001000) != 0 {
+ rval |= FMA3
+ }
+ }
+ }
+
+ // Check AVX2, AVX2 requires OS support, but BMI1/2 don't.
+ if mfi >= 7 {
+ _, ebx, ecx, edx := cpuidex(7, 0)
+ if (rval&AVX) != 0 && (ebx&0x00000020) != 0 {
+ rval |= AVX2
+ }
+ if (ebx & 0x00000008) != 0 {
+ rval |= BMI1
+ if (ebx & 0x00000100) != 0 {
+ rval |= BMI2
+ }
+ }
+ if ebx&(1<<2) != 0 {
+ rval |= SGX
+ }
+ if ebx&(1<<4) != 0 {
+ rval |= HLE
+ }
+ if ebx&(1<<9) != 0 {
+ rval |= ERMS
+ }
+ if ebx&(1<<11) != 0 {
+ rval |= RTM
+ }
+ if ebx&(1<<14) != 0 {
+ rval |= MPX
+ }
+ if ebx&(1<<18) != 0 {
+ rval |= RDSEED
+ }
+ if ebx&(1<<19) != 0 {
+ rval |= ADX
+ }
+ if ebx&(1<<29) != 0 {
+ rval |= SHA
+ }
+ if edx&(1<<26) != 0 {
+ rval |= IBPB
+ }
+ if edx&(1<<27) != 0 {
+ rval |= STIBP
+ }
+
+ // Only detect AVX-512 features if XGETBV is supported
+ if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
+ // Check for OS support
+ eax, _ := xgetbv(0)
+
+ // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and
+ // ZMM16-ZMM31 state are enabled by OS)
+ /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS).
+ if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 {
+ if ebx&(1<<16) != 0 {
+ rval |= AVX512F
+ }
+ if ebx&(1<<17) != 0 {
+ rval |= AVX512DQ
+ }
+ if ebx&(1<<21) != 0 {
+ rval |= AVX512IFMA
+ }
+ if ebx&(1<<26) != 0 {
+ rval |= AVX512PF
+ }
+ if ebx&(1<<27) != 0 {
+ rval |= AVX512ER
+ }
+ if ebx&(1<<28) != 0 {
+ rval |= AVX512CD
+ }
+ if ebx&(1<<30) != 0 {
+ rval |= AVX512BW
+ }
+ if ebx&(1<<31) != 0 {
+ rval |= AVX512VL
+ }
+ // ecx
+ if ecx&(1<<1) != 0 {
+ rval |= AVX512VBMI
+ }
+ }
+ }
+ }
+
+ if maxExtendedFunction() >= 0x80000001 {
+ _, _, c, d := cpuid(0x80000001)
+ if (c & (1 << 5)) != 0 {
+ rval |= LZCNT
+ rval |= POPCNT
+ }
+ if (d & (1 << 31)) != 0 {
+ rval |= AMD3DNOW
+ }
+ if (d & (1 << 30)) != 0 {
+ rval |= AMD3DNOWEXT
+ }
+ if (d & (1 << 23)) != 0 {
+ rval |= MMX
+ }
+ if (d & (1 << 22)) != 0 {
+ rval |= MMXEXT
+ }
+ if (c & (1 << 6)) != 0 {
+ rval |= SSE4A
+ }
+ if d&(1<<20) != 0 {
+ rval |= NX
+ }
+ if d&(1<<27) != 0 {
+ rval |= RDTSCP
+ }
+
+ /* Allow for selectively disabling SSE2 functions on AMD processors
+ with SSE2 support but not SSE4a. This includes Athlon64, some
+ Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
+ than SSE2 often enough to utilize this special-case flag.
+ AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
+ so that SSE2 is used unless explicitly disabled by checking
+ AV_CPU_FLAG_SSE2SLOW. */
+ if vendorID() != Intel &&
+ rval&SSE2 != 0 && (c&0x00000040) == 0 {
+ rval |= SSE2SLOW
+ }
+
+ /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
+ * used unless the OS has AVX support. */
+ if (rval & AVX) != 0 {
+ if (c & 0x00000800) != 0 {
+ rval |= XOP
+ }
+ if (c & 0x00010000) != 0 {
+ rval |= FMA4
+ }
+ }
+
+ if vendorID() == Intel {
+ family, model := familyModel()
+ if family == 6 && (model == 9 || model == 13 || model == 14) {
+ /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
+ * 6/14 (core1 "yonah") theoretically support sse2, but it's
+ * usually slower than mmx. */
+ if (rval & SSE2) != 0 {
+ rval |= SSE2SLOW
+ }
+ if (rval & SSE3) != 0 {
+ rval |= SSE3SLOW
+ }
+ }
+ /* The Atom processor has SSSE3 support, which is useful in many cases,
+ * but sometimes the SSSE3 version is slower than the SSE2 equivalent
+ * on the Atom, but is generally faster on other processors supporting
+ * SSSE3. This flag allows for selectively disabling certain SSSE3
+ * functions on the Atom. */
+ if family == 6 && model == 28 {
+ rval |= ATOM
+ }
+ }
+ }
+ return Flags(rval)
+}
+
+func valAsString(values ...uint32) []byte {
+ r := make([]byte, 4*len(values))
+ for i, v := range values {
+ dst := r[i*4:]
+ dst[0] = byte(v & 0xff)
+ dst[1] = byte((v >> 8) & 0xff)
+ dst[2] = byte((v >> 16) & 0xff)
+ dst[3] = byte((v >> 24) & 0xff)
+ switch {
+ case dst[0] == 0:
+ return r[:i*4]
+ case dst[1] == 0:
+ return r[:i*4+1]
+ case dst[2] == 0:
+ return r[:i*4+2]
+ case dst[3] == 0:
+ return r[:i*4+3]
+ }
+ }
+ return r
+}
diff --git a/vendor/github.com/klauspost/cpuid/cpuid_386.s b/vendor/github.com/klauspost/cpuid/cpuid_386.s
new file mode 100644
index 000000000..4d731711e
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/cpuid_386.s
@@ -0,0 +1,42 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// +build 386,!gccgo
+
+// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuid(SB), 7, $0
+ XORL CX, CX
+ MOVL op+0(FP), AX
+ CPUID
+ MOVL AX, eax+4(FP)
+ MOVL BX, ebx+8(FP)
+ MOVL CX, ecx+12(FP)
+ MOVL DX, edx+16(FP)
+ RET
+
+// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuidex(SB), 7, $0
+ MOVL op+0(FP), AX
+ MOVL op2+4(FP), CX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func xgetbv(index uint32) (eax, edx uint32)
+TEXT ·asmXgetbv(SB), 7, $0
+ MOVL index+0(FP), CX
+ BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
+ MOVL AX, eax+4(FP)
+ MOVL DX, edx+8(FP)
+ RET
+
+// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+TEXT ·asmRdtscpAsm(SB), 7, $0
+ BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
+ MOVL AX, eax+0(FP)
+ MOVL BX, ebx+4(FP)
+ MOVL CX, ecx+8(FP)
+ MOVL DX, edx+12(FP)
+ RET
diff --git a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s
new file mode 100644
index 000000000..3c1d60e42
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s
@@ -0,0 +1,42 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+//+build amd64,!gccgo
+
+// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuid(SB), 7, $0
+ XORQ CX, CX
+ MOVL op+0(FP), AX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuidex(SB), 7, $0
+ MOVL op+0(FP), AX
+ MOVL op2+4(FP), CX
+ CPUID
+ MOVL AX, eax+8(FP)
+ MOVL BX, ebx+12(FP)
+ MOVL CX, ecx+16(FP)
+ MOVL DX, edx+20(FP)
+ RET
+
+// func asmXgetbv(index uint32) (eax, edx uint32)
+TEXT ·asmXgetbv(SB), 7, $0
+ MOVL index+0(FP), CX
+ BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
+ MOVL AX, eax+8(FP)
+ MOVL DX, edx+12(FP)
+ RET
+
+// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+TEXT ·asmRdtscpAsm(SB), 7, $0
+ BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
+ MOVL AX, eax+0(FP)
+ MOVL BX, ebx+4(FP)
+ MOVL CX, ecx+8(FP)
+ MOVL DX, edx+12(FP)
+ RET
diff --git a/vendor/github.com/klauspost/cpuid/detect_intel.go b/vendor/github.com/klauspost/cpuid/detect_intel.go
new file mode 100644
index 000000000..a5f04dd6d
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/detect_intel.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// +build 386,!gccgo amd64,!gccgo
+
+package cpuid
+
+func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+func asmXgetbv(index uint32) (eax, edx uint32)
+func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+
+func initCPU() {
+ cpuid = asmCpuid
+ cpuidex = asmCpuidex
+ xgetbv = asmXgetbv
+ rdtscpAsm = asmRdtscpAsm
+}
diff --git a/vendor/github.com/klauspost/cpuid/detect_ref.go b/vendor/github.com/klauspost/cpuid/detect_ref.go
new file mode 100644
index 000000000..909c5d9a7
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/detect_ref.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// +build !amd64,!386 gccgo
+
+package cpuid
+
+func initCPU() {
+ cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
+ return 0, 0, 0, 0
+ }
+
+ cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
+ return 0, 0, 0, 0
+ }
+
+ xgetbv = func(index uint32) (eax, edx uint32) {
+ return 0, 0
+ }
+
+ rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
+ return 0, 0, 0, 0
+ }
+}
diff --git a/vendor/github.com/klauspost/cpuid/generate.go b/vendor/github.com/klauspost/cpuid/generate.go
new file mode 100644
index 000000000..90e7a98d2
--- /dev/null
+++ b/vendor/github.com/klauspost/cpuid/generate.go
@@ -0,0 +1,4 @@
+package cpuid
+
+//go:generate go run private-gen.go
+//go:generate gofmt -w ./private
diff --git a/vendor/github.com/klauspost/pgzip/GO_LICENSE b/vendor/github.com/klauspost/pgzip/GO_LICENSE
new file mode 100644
index 000000000..744875676
--- /dev/null
+++ b/vendor/github.com/klauspost/pgzip/GO_LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/pgzip/LICENSE b/vendor/github.com/klauspost/pgzip/LICENSE
new file mode 100644
index 000000000..2bdc0d751
--- /dev/null
+++ b/vendor/github.com/klauspost/pgzip/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Klaus Post
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/klauspost/pgzip/README.md b/vendor/github.com/klauspost/pgzip/README.md
new file mode 100644
index 000000000..81000996c
--- /dev/null
+++ b/vendor/github.com/klauspost/pgzip/README.md
@@ -0,0 +1,136 @@
+pgzip
+=====
+
+Go parallel gzip compression/decompression. This is a fully gzip compatible drop in replacement for "compress/gzip".
+
+This will split compression into blocks that are compressed in parallel.
+This can be useful for compressing big amounts of data. The output is a standard gzip file.
+
+The gzip decompression is modified so it decompresses ahead of the current reader.
+This means that reads will be non-blocking if the decompressor can keep ahead of your code reading from it.
+CRC calculation also takes place in a separate goroutine.
+
+You should only use this if you are (de)compressing big amounts of data,
+say **more than 1MB** at the time, otherwise you will not see any benefit,
+and it will likely be faster to use the internal gzip library
+or [this package](https://github.com/klauspost/compress).
+
+It is important to note that this library creates and reads *standard gzip files*.
+You do not have to match the compressor/decompressor to get the described speedups,
+and the gzip files are fully compatible with other gzip readers/writers.
+
+A golang variant of this is [bgzf](https://godoc.org/github.com/biogo/hts/bgzf),
+which has the same feature, as well as seeking in the resulting file.
+The only drawback is a slightly bigger overhead compared to this and pure gzip.
+See a comparison below.
+
+[![GoDoc][1]][2] [![Build Status][3]][4]
+
+[1]: https://godoc.org/github.com/klauspost/pgzip?status.svg
+[2]: https://godoc.org/github.com/klauspost/pgzip
+[3]: https://travis-ci.org/klauspost/pgzip.svg
+[4]: https://travis-ci.org/klauspost/pgzip
+
+Installation
+====
+```go get github.com/klauspost/pgzip/...```
+
+You might need to get/update the dependencies:
+
+```
+go get -u github.com/klauspost/compress
+go get -u github.com/klauspost/crc32
+```
+
+Usage
+====
+[Godoc Doumentation](https://godoc.org/github.com/klauspost/pgzip)
+
+To use as a replacement for gzip, exchange
+
+```import "compress/gzip"```
+with
+```import gzip "github.com/klauspost/pgzip"```.
+
+# Changes
+
+* Oct 6, 2016: Fixed an issue if the destination writer returned an error.
+* Oct 6, 2016: Better buffer reuse, should now generate less garbage.
+* Oct 6, 2016: Output does not change based on write sizes.
+* Dec 8, 2015: Decoder now supports the io.WriterTo interface, giving a speedup and less GC pressure.
+* Oct 9, 2015: Reduced allocations by ~35 by using sync.Pool. ~15% overall speedup.
+
+Changes in [github.com/klauspost/compress](https://github.com/klauspost/compress#changelog) are also carried over, so see that for more changes.
+
+## Compression
+The simplest way to use this is to simply do the same as you would when using [compress/gzip](http://golang.org/pkg/compress/gzip).
+
+To change the block size, use the added (*pgzip.Writer).SetConcurrency(blockSize, blocks int) function. With this you can control the approximate size of your blocks, as well as how many you want to be processing in parallel. Default values for this is SetConcurrency(250000, 16), meaning blocks are split at 250000 bytes and up to 16 blocks can be processing at once before the writer blocks.
+
+
+Example:
+```
+var b bytes.Buffer
+w := gzip.NewWriter(&b)
+w.SetConcurrency(100000, 10)
+w.Write([]byte("hello, world\n"))
+w.Close()
+```
+
+To get any performance gains, you should at least be compressing more than 1 megabyte of data at the time.
+
+You should at least have a block size of 100k and at least a number of blocks that match the number of cores your would like to utilize, but about twice the number of blocks would be the best.
+
+Another side effect of this is, that it is likely to speed up your other code, since writes to the compressor only blocks if the compressor is already compressing the number of blocks you have specified. This also means you don't have worry about buffering input to the compressor.
+
+## Decompression
+
+Decompression works similar to compression. That means that you simply call pgzip the same way as you would call [compress/gzip](http://golang.org/pkg/compress/gzip).
+
+The only difference is that if you want to specify your own readahead, you have to use `pgzip.NewReaderN(r io.Reader, blockSize, blocks int)` to get a reader with your custom blocksizes. The `blockSize` is the size of each block decoded, and `blocks` is the maximum number of blocks that is decoded ahead.
+
+See [Example on playground](http://play.golang.org/p/uHv1B5NbDh)
+
+Performance
+====
+## Compression
+
+See my blog post in [Benchmarks of Golang Gzip](https://blog.klauspost.com/go-gzipdeflate-benchmarks/).
+
+Compression cost is usually about 0.2% with default settings with a block size of 250k.
+
+Example with GOMAXPROC set to 8 (quad core with 8 hyperthreads)
+
+Content is [Matt Mahoneys 10GB corpus](http://mattmahoney.net/dc/10gb.html). Compression level 6.
+
+Compressor | MB/sec | speedup | size | size overhead (lower=better)
+------------|----------|---------|------|---------
+[gzip](http://golang.org/pkg/compress/gzip) (golang) | 7.21MB/s | 1.0x | 4786608902 | 0%
+[gzip](http://github.com/klauspost/compress/gzip) (klauspost) | 10.98MB/s | 1.52x | 4781331645 | -0.11%
+[pgzip](https://github.com/klauspost/pgzip) (klauspost) | 50.76MB/s|7.04x | 4784121440 | -0.052%
+[bgzf](https://godoc.org/github.com/biogo/hts/bgzf) (biogo) | 38.65MB/s | 5.36x | 4924899484 | 2.889%
+[pargzip](https://godoc.org/github.com/golang/build/pargzip) (builder) | 32.00MB/s | 4.44x | 4791226567 | 0.096%
+
+pgzip also contains a [linear time compression](https://github.com/klauspost/compress#linear-time-compression) mode, that will allow compression at ~150MB per core per second, independent of the content.
+
+See the [complete sheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) for different content types and compression settings.
+
+## Decompression
+
+The decompression speedup is there because it allows you to do other work while the decompression is taking place.
+
+In the example above, the numbers are as follows on a 4 CPU machine:
+
+Decompressor | Time | Speedup
+-------------|------|--------
+[gzip](http://golang.org/pkg/compress/gzip) (golang) | 1m28.85s | 0%
+[pgzip](https://github.com/klauspost/pgzip) (golang) | 43.48s | 104%
+
+But wait, since gzip decompression is inherently singlethreaded (aside from CRC calculation) how can it be more than 100% faster? Because pgzip due to its design also acts as a buffer. When using unbuffered gzip, you are also waiting for io when you are decompressing. If the gzip decoder can keep up, it will always have data ready for your reader, and you will not be waiting for input to the gzip decompressor to complete.
+
+This is pretty much an optimal situation for pgzip, but it reflects most common usecases for CPU intensive gzip usage.
+
+I haven't included [bgzf](https://godoc.org/github.com/biogo/hts/bgzf) in this comparison, since it only can decompress files created by a compatible encoder, and therefore cannot be considered a generic gzip decompressor. But if you are able to compress your files with a bgzf compatible program, you can expect it to scale beyond 100%.
+
+# License
+This contains large portions of code from the go repository - see GO_LICENSE for more information. The changes are released under MIT License. See LICENSE for more information.
diff --git a/vendor/github.com/klauspost/pgzip/gunzip.go b/vendor/github.com/klauspost/pgzip/gunzip.go
new file mode 100644
index 000000000..93efec714
--- /dev/null
+++ b/vendor/github.com/klauspost/pgzip/gunzip.go
@@ -0,0 +1,573 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pgzip implements reading and writing of gzip format compressed files,
+// as specified in RFC 1952.
+//
+// This is a drop in replacement for "compress/gzip".
+// This will split compression into blocks that are compressed in parallel.
+// This can be useful for compressing big amounts of data.
+// The gzip decompression has not been modified, but remains in the package,
+// so you can use it as a complete replacement for "compress/gzip".
+//
+// See more at https://github.com/klauspost/pgzip
+package pgzip
+
+import (
+ "bufio"
+ "errors"
+ "hash"
+ "hash/crc32"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/klauspost/compress/flate"
+)
+
+const (
+ gzipID1 = 0x1f
+ gzipID2 = 0x8b
+ gzipDeflate = 8
+ flagText = 1 << 0
+ flagHdrCrc = 1 << 1
+ flagExtra = 1 << 2
+ flagName = 1 << 3
+ flagComment = 1 << 4
+)
+
+func makeReader(r io.Reader) flate.Reader {
+ if rr, ok := r.(flate.Reader); ok {
+ return rr
+ }
+ return bufio.NewReader(r)
+}
+
+var (
+ // ErrChecksum is returned when reading GZIP data that has an invalid checksum.
+ ErrChecksum = errors.New("gzip: invalid checksum")
+ // ErrHeader is returned when reading GZIP data that has an invalid header.
+ ErrHeader = errors.New("gzip: invalid header")
+)
+
+// The gzip file stores a header giving metadata about the compressed file.
+// That header is exposed as the fields of the Writer and Reader structs.
+type Header struct {
+ Comment string // comment
+ Extra []byte // "extra data"
+ ModTime time.Time // modification time
+ Name string // file name
+ OS byte // operating system type
+}
+
+// A Reader is an io.Reader that can be read to retrieve
+// uncompressed data from a gzip-format compressed file.
+//
+// In general, a gzip file can be a concatenation of gzip files,
+// each with its own header. Reads from the Reader
+// return the concatenation of the uncompressed data of each.
+// Only the first header is recorded in the Reader fields.
+//
+// Gzip files store a length and checksum of the uncompressed data.
+// The Reader will return a ErrChecksum when Read
+// reaches the end of the uncompressed data if it does not
+// have the expected length or checksum. Clients should treat data
+// returned by Read as tentative until they receive the io.EOF
+// marking the end of the data.
+type Reader struct {
+ Header
+ r flate.Reader
+ decompressor io.ReadCloser
+ digest hash.Hash32
+ size uint32
+ flg byte
+ buf [512]byte
+ err error
+ closeErr chan error
+ multistream bool
+
+ readAhead chan read
+ roff int // read offset
+ current []byte
+ closeReader chan struct{}
+ lastBlock bool
+ blockSize int
+ blocks int
+
+ activeRA bool // Indication if readahead is active
+ mu sync.Mutex // Lock for above
+
+ blockPool chan []byte
+}
+
+type read struct {
+ b []byte
+ err error
+}
+
+// NewReader creates a new Reader reading the given reader.
+// The implementation buffers input and may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the Reader when done.
+func NewReader(r io.Reader) (*Reader, error) {
+ z := new(Reader)
+ z.blocks = defaultBlocks
+ z.blockSize = defaultBlockSize
+ z.r = makeReader(r)
+ z.digest = crc32.NewIEEE()
+ z.multistream = true
+ z.blockPool = make(chan []byte, z.blocks)
+ for i := 0; i < z.blocks; i++ {
+ z.blockPool <- make([]byte, z.blockSize)
+ }
+ if err := z.readHeader(true); err != nil {
+ return nil, err
+ }
+ return z, nil
+}
+
+// NewReaderN creates a new Reader reading the given reader.
+// The implementation buffers input and may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the Reader when done.
+//
+// With this you can control the approximate size of your blocks,
+// as well as how many blocks you want to have prefetched.
+//
+// Default values for this is blockSize = 250000, blocks = 16,
+// meaning up to 16 blocks of maximum 250000 bytes will be
+// prefetched.
+func NewReaderN(r io.Reader, blockSize, blocks int) (*Reader, error) {
+ z := new(Reader)
+ z.blocks = blocks
+ z.blockSize = blockSize
+ z.r = makeReader(r)
+ z.digest = crc32.NewIEEE()
+ z.multistream = true
+
+ // Account for too small values
+ if z.blocks <= 0 {
+ z.blocks = defaultBlocks
+ }
+ if z.blockSize <= 512 {
+ z.blockSize = defaultBlockSize
+ }
+ z.blockPool = make(chan []byte, z.blocks)
+ for i := 0; i < z.blocks; i++ {
+ z.blockPool <- make([]byte, z.blockSize)
+ }
+ if err := z.readHeader(true); err != nil {
+ return nil, err
+ }
+ return z, nil
+}
+
+// Reset discards the Reader z's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) error {
+ z.killReadAhead()
+ z.r = makeReader(r)
+ z.digest = crc32.NewIEEE()
+ z.size = 0
+ z.err = nil
+ z.multistream = true
+
+ // Account for uninitialized values
+ if z.blocks <= 0 {
+ z.blocks = defaultBlocks
+ }
+ if z.blockSize <= 512 {
+ z.blockSize = defaultBlockSize
+ }
+
+ if z.blockPool == nil {
+ z.blockPool = make(chan []byte, z.blocks)
+ for i := 0; i < z.blocks; i++ {
+ z.blockPool <- make([]byte, z.blockSize)
+ }
+ }
+
+ return z.readHeader(true)
+}
+
+// Multistream controls whether the reader supports multistream files.
+//
+// If enabled (the default), the Reader expects the input to be a sequence
+// of individually gzipped data streams, each with its own header and
+// trailer, ending at EOF. The effect is that the concatenation of a sequence
+// of gzipped files is treated as equivalent to the gzip of the concatenation
+// of the sequence. This is standard behavior for gzip readers.
+//
+// Calling Multistream(false) disables this behavior; disabling the behavior
+// can be useful when reading file formats that distinguish individual gzip
+// data streams or mix gzip data streams with other data streams.
+// In this mode, when the Reader reaches the end of the data stream,
+// Read returns io.EOF. If the underlying reader implements io.ByteReader,
+// it will be left positioned just after the gzip stream.
+// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
+// If there is no next stream, z.Reset(r) will return io.EOF.
+func (z *Reader) Multistream(ok bool) {
+ z.multistream = ok
+}
+
+// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950).
+func get4(p []byte) uint32 {
+ return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+}
+
+func (z *Reader) readString() (string, error) {
+ var err error
+ needconv := false
+ for i := 0; ; i++ {
+ if i >= len(z.buf) {
+ return "", ErrHeader
+ }
+ z.buf[i], err = z.r.ReadByte()
+ if err != nil {
+ return "", err
+ }
+ if z.buf[i] > 0x7f {
+ needconv = true
+ }
+ if z.buf[i] == 0 {
+ // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
+ if needconv {
+ s := make([]rune, 0, i)
+ for _, v := range z.buf[0:i] {
+ s = append(s, rune(v))
+ }
+ return string(s), nil
+ }
+ return string(z.buf[0:i]), nil
+ }
+ }
+}
+
+func (z *Reader) read2() (uint32, error) {
+ _, err := io.ReadFull(z.r, z.buf[0:2])
+ if err != nil {
+ return 0, err
+ }
+ return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil
+}
+
+func (z *Reader) readHeader(save bool) error {
+ z.killReadAhead()
+
+ _, err := io.ReadFull(z.r, z.buf[0:10])
+ if err != nil {
+ return err
+ }
+ if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
+ return ErrHeader
+ }
+ z.flg = z.buf[3]
+ if save {
+ z.ModTime = time.Unix(int64(get4(z.buf[4:8])), 0)
+ // z.buf[8] is xfl, ignored
+ z.OS = z.buf[9]
+ }
+ z.digest.Reset()
+ z.digest.Write(z.buf[0:10])
+
+ if z.flg&flagExtra != 0 {
+ n, err := z.read2()
+ if err != nil {
+ return err
+ }
+ data := make([]byte, n)
+ if _, err = io.ReadFull(z.r, data); err != nil {
+ return err
+ }
+ if save {
+ z.Extra = data
+ }
+ }
+
+ var s string
+ if z.flg&flagName != 0 {
+ if s, err = z.readString(); err != nil {
+ return err
+ }
+ if save {
+ z.Name = s
+ }
+ }
+
+ if z.flg&flagComment != 0 {
+ if s, err = z.readString(); err != nil {
+ return err
+ }
+ if save {
+ z.Comment = s
+ }
+ }
+
+ if z.flg&flagHdrCrc != 0 {
+ n, err := z.read2()
+ if err != nil {
+ return err
+ }
+ sum := z.digest.Sum32() & 0xFFFF
+ if n != sum {
+ return ErrHeader
+ }
+ }
+
+ z.digest.Reset()
+ z.decompressor = flate.NewReader(z.r)
+ z.doReadAhead()
+ return nil
+}
+
+func (z *Reader) killReadAhead() error {
+ z.mu.Lock()
+ defer z.mu.Unlock()
+ if z.activeRA {
+ if z.closeReader != nil {
+ close(z.closeReader)
+ }
+
+ // Wait for decompressor to be closed and return error, if any.
+ e, ok := <-z.closeErr
+ z.activeRA = false
+ if !ok {
+ // Channel is closed, so if there was any error it has already been returned.
+ return nil
+ }
+ return e
+ }
+ return nil
+}
+
+// Starts readahead.
+// Will return on error (including io.EOF)
+// or when z.closeReader is closed.
+func (z *Reader) doReadAhead() {
+ z.mu.Lock()
+ defer z.mu.Unlock()
+ z.activeRA = true
+
+ if z.blocks <= 0 {
+ z.blocks = defaultBlocks
+ }
+ if z.blockSize <= 512 {
+ z.blockSize = defaultBlockSize
+ }
+ ra := make(chan read, z.blocks)
+ z.readAhead = ra
+ closeReader := make(chan struct{}, 0)
+ z.closeReader = closeReader
+ z.lastBlock = false
+ closeErr := make(chan error, 1)
+ z.closeErr = closeErr
+ z.size = 0
+ z.roff = 0
+ z.current = nil
+ decomp := z.decompressor
+
+ go func() {
+ defer func() {
+ closeErr <- decomp.Close()
+ close(closeErr)
+ close(ra)
+ }()
+
+ // We hold a local reference to digest, since
+ // it way be changed by reset.
+ digest := z.digest
+ var wg sync.WaitGroup
+ for {
+ var buf []byte
+ select {
+ case buf = <-z.blockPool:
+ case <-closeReader:
+ return
+ }
+ buf = buf[0:z.blockSize]
+ // Try to fill the buffer
+ n, err := io.ReadFull(decomp, buf)
+ if err == io.ErrUnexpectedEOF {
+ if n > 0 {
+ err = nil
+ } else {
+ // If we got zero bytes, we need to establish if
+ // we reached end of stream or truncated stream.
+ _, err = decomp.Read([]byte{})
+ if err == io.EOF {
+ err = nil
+ }
+ }
+ }
+ if n < len(buf) {
+ buf = buf[0:n]
+ }
+ wg.Wait()
+ wg.Add(1)
+ go func() {
+ digest.Write(buf)
+ wg.Done()
+ }()
+ z.size += uint32(n)
+
+ // If we return any error, out digest must be ready
+ if err != nil {
+ wg.Wait()
+ }
+ select {
+ case z.readAhead <- read{b: buf, err: err}:
+ case <-closeReader:
+ // Sent on close, we don't care about the next results
+ return
+ }
+ if err != nil {
+ return
+ }
+ }
+ }()
+}
+
+func (z *Reader) Read(p []byte) (n int, err error) {
+ if z.err != nil {
+ return 0, z.err
+ }
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ for {
+ if len(z.current) == 0 && !z.lastBlock {
+ read := <-z.readAhead
+
+ if read.err != nil {
+ // If not nil, the reader will have exited
+ z.closeReader = nil
+
+ if read.err != io.EOF {
+ z.err = read.err
+ return
+ }
+ if read.err == io.EOF {
+ z.lastBlock = true
+ err = nil
+ }
+ }
+ z.current = read.b
+ z.roff = 0
+ }
+ avail := z.current[z.roff:]
+ if len(p) >= len(avail) {
+ // If len(p) >= len(current), return all content of current
+ n = copy(p, avail)
+ z.blockPool <- z.current
+ z.current = nil
+ if z.lastBlock {
+ err = io.EOF
+ break
+ }
+ } else {
+ // We copy as much as there is space for
+ n = copy(p, avail)
+ z.roff += n
+ }
+ return
+ }
+
+ // Finished file; check checksum + size.
+ if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
+ z.err = err
+ return 0, err
+ }
+ crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8])
+ sum := z.digest.Sum32()
+ if sum != crc32 || isize != z.size {
+ z.err = ErrChecksum
+ return 0, z.err
+ }
+
+ // File is ok; should we attempt reading one more?
+ if !z.multistream {
+ return 0, io.EOF
+ }
+
+ // Is there another?
+ if err = z.readHeader(false); err != nil {
+ z.err = err
+ return
+ }
+
+ // Yes. Reset and read from it.
+ return z.Read(p)
+}
+
+func (z *Reader) WriteTo(w io.Writer) (n int64, err error) {
+ total := int64(0)
+ for {
+ if z.err != nil {
+ return total, z.err
+ }
+ // We write both to output and digest.
+ for {
+ // Read from input
+ read := <-z.readAhead
+ if read.err != nil {
+ // If not nil, the reader will have exited
+ z.closeReader = nil
+
+ if read.err != io.EOF {
+ z.err = read.err
+ return total, z.err
+ }
+ if read.err == io.EOF {
+ z.lastBlock = true
+ err = nil
+ }
+ }
+ // Write what we got
+ n, err := w.Write(read.b)
+ if n != len(read.b) {
+ return total, io.ErrShortWrite
+ }
+ total += int64(n)
+ if err != nil {
+ return total, err
+ }
+ // Put block back
+ z.blockPool <- read.b
+ if z.lastBlock {
+ break
+ }
+ }
+
+ // Finished file; check checksum + size.
+ if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
+ z.err = err
+ return total, err
+ }
+ crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8])
+ sum := z.digest.Sum32()
+ if sum != crc32 || isize != z.size {
+ z.err = ErrChecksum
+ return total, z.err
+ }
+ // File is ok; should we attempt reading one more?
+ if !z.multistream {
+ return total, nil
+ }
+
+ // Is there another?
+ err = z.readHeader(false)
+ if err == io.EOF {
+ return total, nil
+ }
+ if err != nil {
+ z.err = err
+ return total, err
+ }
+ }
+}
+
+// Close closes the Reader. It does not close the underlying io.Reader.
+func (z *Reader) Close() error {
+ return z.killReadAhead()
+}
diff --git a/vendor/github.com/klauspost/pgzip/gzip.go b/vendor/github.com/klauspost/pgzip/gzip.go
new file mode 100644
index 000000000..85d14e9cb
--- /dev/null
+++ b/vendor/github.com/klauspost/pgzip/gzip.go
@@ -0,0 +1,501 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pgzip
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash"
+ "hash/crc32"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/klauspost/compress/flate"
+)
+
+const (
+ defaultBlockSize = 256 << 10
+ tailSize = 16384
+ defaultBlocks = 16
+)
+
+// These constants are copied from the flate package, so that code that imports
+// "compress/gzip" does not also have to import "compress/flate".
+const (
+ NoCompression = flate.NoCompression
+ BestSpeed = flate.BestSpeed
+ BestCompression = flate.BestCompression
+ DefaultCompression = flate.DefaultCompression
+ ConstantCompression = flate.ConstantCompression
+ HuffmanOnly = flate.HuffmanOnly
+)
+
+// A Writer is an io.WriteCloser.
+// Writes to a Writer are compressed and written to w.
+type Writer struct {
+ Header
+ w io.Writer
+ level int
+ wroteHeader bool
+ blockSize int
+ blocks int
+ currentBuffer []byte
+ prevTail []byte
+ digest hash.Hash32
+ size int
+ closed bool
+ buf [10]byte
+ errMu sync.RWMutex
+ err error
+ pushedErr chan struct{}
+ results chan result
+ dictFlatePool sync.Pool
+ dstPool sync.Pool
+ wg sync.WaitGroup
+}
+
+type result struct {
+ result chan []byte
+ notifyWritten chan struct{}
+}
+
+// Use SetConcurrency to finetune the concurrency level if needed.
+//
+// With this you can control the approximate size of your blocks,
+// as well as how many you want to be processing in parallel.
+//
+// Default values for this is SetConcurrency(250000, 16),
+// meaning blocks are split at 250000 bytes and up to 16 blocks
+// can be processing at once before the writer blocks.
+func (z *Writer) SetConcurrency(blockSize, blocks int) error {
+ if blockSize <= tailSize {
+ return fmt.Errorf("gzip: block size cannot be less than or equal to %d", tailSize)
+ }
+ if blocks <= 0 {
+ return errors.New("gzip: blocks cannot be zero or less")
+ }
+ if blockSize == z.blockSize && blocks == z.blocks {
+ return nil
+ }
+ z.blockSize = blockSize
+ z.results = make(chan result, blocks)
+ z.blocks = blocks
+ z.dstPool = sync.Pool{New: func() interface{} { return make([]byte, 0, blockSize+(blockSize)>>4) }}
+ return nil
+}
+
+// NewWriter returns a new Writer.
+// Writes to the returned writer are compressed and written to w.
+//
+// It is the caller's responsibility to call Close on the WriteCloser when done.
+// Writes may be buffered and not flushed until Close.
+//
+// Callers that wish to set the fields in Writer.Header must do so before
+// the first call to Write or Close. The Comment and Name header fields are
+// UTF-8 strings in Go, but the underlying format requires NUL-terminated ISO
+// 8859-1 (Latin-1). NUL or non-Latin-1 runes in those strings will lead to an
+// error on Write.
+func NewWriter(w io.Writer) *Writer {
+ z, _ := NewWriterLevel(w, DefaultCompression)
+ return z
+}
+
+// NewWriterLevel is like NewWriter but specifies the compression level instead
+// of assuming DefaultCompression.
+//
+// The compression level can be DefaultCompression, NoCompression, or any
+// integer value between BestSpeed and BestCompression inclusive. The error
+// returned will be nil if the level is valid.
+func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
+ if level < ConstantCompression || level > BestCompression {
+ return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
+ }
+ z := new(Writer)
+ z.SetConcurrency(defaultBlockSize, defaultBlocks)
+ z.init(w, level)
+ return z, nil
+}
+
+// This function must be used by goroutines to set an
+// error condition, since z.err access is restricted
+// to the callers goruotine.
+func (z *Writer) pushError(err error) {
+ z.errMu.Lock()
+ if z.err != nil {
+ z.errMu.Unlock()
+ return
+ }
+ z.err = err
+ close(z.pushedErr)
+ z.errMu.Unlock()
+}
+
+func (z *Writer) init(w io.Writer, level int) {
+ z.wg.Wait()
+ digest := z.digest
+ if digest != nil {
+ digest.Reset()
+ } else {
+ digest = crc32.NewIEEE()
+ }
+ z.Header = Header{OS: 255}
+ z.w = w
+ z.level = level
+ z.digest = digest
+ z.pushedErr = make(chan struct{}, 0)
+ z.results = make(chan result, z.blocks)
+ z.err = nil
+ z.closed = false
+ z.Comment = ""
+ z.Extra = nil
+ z.ModTime = time.Time{}
+ z.wroteHeader = false
+ z.currentBuffer = nil
+ z.buf = [10]byte{}
+ z.prevTail = nil
+ z.size = 0
+ if z.dictFlatePool.New == nil {
+ z.dictFlatePool.New = func() interface{} {
+ f, _ := flate.NewWriterDict(w, level, nil)
+ return f
+ }
+ }
+}
+
+// Reset discards the Writer z's state and makes it equivalent to the
+// result of its original state from NewWriter or NewWriterLevel, but
+// writing to w instead. This permits reusing a Writer rather than
+// allocating a new one.
+func (z *Writer) Reset(w io.Writer) {
+ if z.results != nil && !z.closed {
+ close(z.results)
+ }
+ z.SetConcurrency(defaultBlockSize, defaultBlocks)
+ z.init(w, z.level)
+}
+
+// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950).
+func put2(p []byte, v uint16) {
+ p[0] = uint8(v >> 0)
+ p[1] = uint8(v >> 8)
+}
+
+func put4(p []byte, v uint32) {
+ p[0] = uint8(v >> 0)
+ p[1] = uint8(v >> 8)
+ p[2] = uint8(v >> 16)
+ p[3] = uint8(v >> 24)
+}
+
+// writeBytes writes a length-prefixed byte slice to z.w.
+func (z *Writer) writeBytes(b []byte) error {
+ if len(b) > 0xffff {
+ return errors.New("gzip.Write: Extra data is too large")
+ }
+ put2(z.buf[0:2], uint16(len(b)))
+ _, err := z.w.Write(z.buf[0:2])
+ if err != nil {
+ return err
+ }
+ _, err = z.w.Write(b)
+ return err
+}
+
+// writeString writes a UTF-8 string s in GZIP's format to z.w.
+// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
+func (z *Writer) writeString(s string) (err error) {
+ // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
+ needconv := false
+ for _, v := range s {
+ if v == 0 || v > 0xff {
+ return errors.New("gzip.Write: non-Latin-1 header string")
+ }
+ if v > 0x7f {
+ needconv = true
+ }
+ }
+ if needconv {
+ b := make([]byte, 0, len(s))
+ for _, v := range s {
+ b = append(b, byte(v))
+ }
+ _, err = z.w.Write(b)
+ } else {
+ _, err = io.WriteString(z.w, s)
+ }
+ if err != nil {
+ return err
+ }
+ // GZIP strings are NUL-terminated.
+ z.buf[0] = 0
+ _, err = z.w.Write(z.buf[0:1])
+ return err
+}
+
+// compressCurrent will compress the data currently buffered
+// This should only be called from the main writer/flush/closer
+func (z *Writer) compressCurrent(flush bool) {
+ r := result{}
+ r.result = make(chan []byte, 1)
+ r.notifyWritten = make(chan struct{}, 0)
+ select {
+ case z.results <- r:
+ case <-z.pushedErr:
+ return
+ }
+
+ // If block given is more than twice the block size, split it.
+ c := z.currentBuffer
+ if len(c) > z.blockSize*2 {
+ c = c[:z.blockSize]
+ z.wg.Add(1)
+ go z.compressBlock(c, z.prevTail, r, false)
+ z.prevTail = c[len(c)-tailSize:]
+ z.currentBuffer = z.currentBuffer[z.blockSize:]
+ z.compressCurrent(flush)
+ // Last one flushes if needed
+ return
+ }
+
+ z.wg.Add(1)
+ go z.compressBlock(c, z.prevTail, r, z.closed)
+ if len(c) > tailSize {
+ z.prevTail = c[len(c)-tailSize:]
+ } else {
+ z.prevTail = nil
+ }
+ z.currentBuffer = z.dstPool.Get().([]byte)
+ z.currentBuffer = z.currentBuffer[:0]
+
+ // Wait if flushing
+ if flush {
+ <-r.notifyWritten
+ }
+}
+
+// Returns an error if it has been set.
+// Cannot be used by functions that are from internal goroutines.
+func (z *Writer) checkError() error {
+ z.errMu.RLock()
+ err := z.err
+ z.errMu.RUnlock()
+ return err
+}
+
+// Write writes a compressed form of p to the underlying io.Writer. The
+// compressed bytes are not necessarily flushed to output until
+// the Writer is closed or Flush() is called.
+//
+// The function will return quickly, if there are unused buffers.
+// The sent slice (p) is copied, and the caller is free to re-use the buffer
+// when the function returns.
+//
+// Errors that occur during compression will be reported later, and a nil error
+// does not signify that the compression succeeded (since it is most likely still running)
+// That means that the call that returns an error may not be the call that caused it.
+// Only Flush and Close functions are guaranteed to return any errors up to that point.
+func (z *Writer) Write(p []byte) (int, error) {
+ if err := z.checkError(); err != nil {
+ return 0, err
+ }
+ // Write the GZIP header lazily.
+ if !z.wroteHeader {
+ z.wroteHeader = true
+ z.buf[0] = gzipID1
+ z.buf[1] = gzipID2
+ z.buf[2] = gzipDeflate
+ z.buf[3] = 0
+ if z.Extra != nil {
+ z.buf[3] |= 0x04
+ }
+ if z.Name != "" {
+ z.buf[3] |= 0x08
+ }
+ if z.Comment != "" {
+ z.buf[3] |= 0x10
+ }
+ put4(z.buf[4:8], uint32(z.ModTime.Unix()))
+ if z.level == BestCompression {
+ z.buf[8] = 2
+ } else if z.level == BestSpeed {
+ z.buf[8] = 4
+ } else {
+ z.buf[8] = 0
+ }
+ z.buf[9] = z.OS
+ var n int
+ var err error
+ n, err = z.w.Write(z.buf[0:10])
+ if err != nil {
+ z.pushError(err)
+ return n, err
+ }
+ if z.Extra != nil {
+ err = z.writeBytes(z.Extra)
+ if err != nil {
+ z.pushError(err)
+ return n, err
+ }
+ }
+ if z.Name != "" {
+ err = z.writeString(z.Name)
+ if err != nil {
+ z.pushError(err)
+ return n, err
+ }
+ }
+ if z.Comment != "" {
+ err = z.writeString(z.Comment)
+ if err != nil {
+ z.pushError(err)
+ return n, err
+ }
+ }
+ // Start receiving data from compressors
+ go func() {
+ listen := z.results
+ for {
+ r, ok := <-listen
+ // If closed, we are finished.
+ if !ok {
+ return
+ }
+ buf := <-r.result
+ n, err := z.w.Write(buf)
+ if err != nil {
+ z.pushError(err)
+ close(r.notifyWritten)
+ return
+ }
+ if n != len(buf) {
+ z.pushError(fmt.Errorf("gzip: short write %d should be %d", n, len(buf)))
+ close(r.notifyWritten)
+ return
+ }
+ z.dstPool.Put(buf)
+ close(r.notifyWritten)
+ }
+ }()
+ z.currentBuffer = make([]byte, 0, z.blockSize)
+ }
+ q := p
+ for len(q) > 0 {
+ length := len(q)
+ if length+len(z.currentBuffer) > z.blockSize {
+ length = z.blockSize - len(z.currentBuffer)
+ }
+ z.digest.Write(q[:length])
+ z.currentBuffer = append(z.currentBuffer, q[:length]...)
+ if len(z.currentBuffer) >= z.blockSize {
+ z.compressCurrent(false)
+ if err := z.checkError(); err != nil {
+ return len(p) - len(q) - length, err
+ }
+ }
+ z.size += length
+ q = q[length:]
+ }
+ return len(p), z.checkError()
+}
+
+// Step 1: compresses buffer to buffer
+// Step 2: send writer to channel
+// Step 3: Close result channel to indicate we are done
+func (z *Writer) compressBlock(p, prevTail []byte, r result, closed bool) {
+ defer func() {
+ close(r.result)
+ z.wg.Done()
+ }()
+ buf := z.dstPool.Get().([]byte)
+ dest := bytes.NewBuffer(buf[:0])
+
+ compressor := z.dictFlatePool.Get().(*flate.Writer)
+ compressor.ResetDict(dest, prevTail)
+ compressor.Write(p)
+
+ err := compressor.Flush()
+ if err != nil {
+ z.pushError(err)
+ return
+ }
+ if closed {
+ err = compressor.Close()
+ if err != nil {
+ z.pushError(err)
+ return
+ }
+ }
+ z.dictFlatePool.Put(compressor)
+ // Read back buffer
+ buf = dest.Bytes()
+ r.result <- buf
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+//
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet. Flush does
+// not return until the data has been written. If the underlying
+// writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (z *Writer) Flush() error {
+ if err := z.checkError(); err != nil {
+ return err
+ }
+ if z.closed {
+ return nil
+ }
+ if !z.wroteHeader {
+ _, err := z.Write(nil)
+ if err != nil {
+ return err
+ }
+ }
+ // We send current block to compression
+ z.compressCurrent(true)
+
+ return z.checkError()
+}
+
+// UncompressedSize will return the number of bytes written.
+// pgzip only, not a function in the official gzip package.
+func (z *Writer) UncompressedSize() int {
+ return z.size
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying
+// io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+ if err := z.checkError(); err != nil {
+ return err
+ }
+ if z.closed {
+ return nil
+ }
+
+ z.closed = true
+ if !z.wroteHeader {
+ z.Write(nil)
+ if err := z.checkError(); err != nil {
+ return err
+ }
+ }
+ z.compressCurrent(true)
+ if err := z.checkError(); err != nil {
+ return err
+ }
+ close(z.results)
+ put4(z.buf[0:4], z.digest.Sum32())
+ put4(z.buf[4:8], uint32(z.size))
+ _, err := z.w.Write(z.buf[0:8])
+ if err != nil {
+ z.pushError(err)
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.code b/vendor/github.com/opencontainers/go-digest/LICENSE
index 0ea3ff81e..0ea3ff81e 100644
--- a/vendor/github.com/opencontainers/go-digest/LICENSE.code
+++ b/vendor/github.com/opencontainers/go-digest/LICENSE
diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md
index 0f5a04092..25aac3470 100644
--- a/vendor/github.com/opencontainers/go-digest/README.md
+++ b/vendor/github.com/opencontainers/go-digest/README.md
@@ -101,4 +101,4 @@ the various OCI projects).
# Copyright and license
-Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
+Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go
index bdff42d92..8813bd26f 100644
--- a/vendor/github.com/opencontainers/go-digest/algorithm.go
+++ b/vendor/github.com/opencontainers/go-digest/algorithm.go
@@ -19,6 +19,7 @@ import (
"fmt"
"hash"
"io"
+ "regexp"
)
// Algorithm identifies and implementation of a digester by an identifier.
@@ -28,9 +29,9 @@ type Algorithm string
// supported digest types
const (
- SHA256 Algorithm = "sha256" // sha256 with hex encoding
- SHA384 Algorithm = "sha384" // sha384 with hex encoding
- SHA512 Algorithm = "sha512" // sha512 with hex encoding
+ SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only)
+ SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only)
+ SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only)
// Canonical is the primary digest algorithm used with the distribution
// project. Other digests may be used but this one is the primary storage
@@ -50,6 +51,14 @@ var (
SHA384: crypto.SHA384,
SHA512: crypto.SHA512,
}
+
+ // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests.
+ // Note that /A-F/ disallowed.
+ anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{
+ SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`),
+ SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`),
+ SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`),
+ }
)
// Available returns true if the digest type is available for use. If this
@@ -125,6 +134,14 @@ func (a Algorithm) Hash() hash.Hash {
return algorithms[a].New()
}
+// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into
+// the encoded portion of the digest.
+func (a Algorithm) Encode(d []byte) string {
+ // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we
+ // add support for back registration, we can modify this accordingly.
+ return fmt.Sprintf("%x", d)
+}
+
// FromReader returns the digest of the reader using the algorithm.
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
digester := a.Digester()
@@ -156,3 +173,20 @@ func (a Algorithm) FromBytes(p []byte) Digest {
func (a Algorithm) FromString(s string) Digest {
return a.FromBytes([]byte(s))
}
+
+// Validate validates the encoded portion string
+func (a Algorithm) Validate(encoded string) error {
+ r, ok := anchoredEncodedRegexps[a]
+ if !ok {
+ return ErrDigestUnsupported
+ }
+ // Digests much always be hex-encoded, ensuring that their hex portion will
+ // always be size*2
+ if a.Size()*2 != len(encoded) {
+ return ErrDigestInvalidLength
+ }
+ if r.MatchString(encoded) {
+ return nil
+ }
+ return ErrDigestInvalidFormat
+}
diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go
index 69e1d2b54..ad398cba2 100644
--- a/vendor/github.com/opencontainers/go-digest/digest.go
+++ b/vendor/github.com/opencontainers/go-digest/digest.go
@@ -45,16 +45,21 @@ func NewDigest(alg Algorithm, h hash.Hash) Digest {
// functions. This is also useful for rebuilding digests from binary
// serializations.
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
- return Digest(fmt.Sprintf("%s:%x", alg, p))
+ return NewDigestFromEncoded(alg, alg.Encode(p))
}
-// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
+// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded.
func NewDigestFromHex(alg, hex string) Digest {
- return Digest(fmt.Sprintf("%s:%s", alg, hex))
+ return NewDigestFromEncoded(Algorithm(alg), hex)
+}
+
+// NewDigestFromEncoded returns a Digest from alg and the encoded digest.
+func NewDigestFromEncoded(alg Algorithm, encoded string) Digest {
+ return Digest(fmt.Sprintf("%s:%s", alg, encoded))
}
// DigestRegexp matches valid digest types.
-var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
+var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`)
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
@@ -96,26 +101,18 @@ func FromString(s string) Digest {
// error if not.
func (d Digest) Validate() error {
s := string(d)
-
i := strings.Index(s, ":")
-
- // validate i then run through regexp
- if i < 0 || i+1 == len(s) || !DigestRegexpAnchored.MatchString(s) {
+ if i <= 0 || i+1 == len(s) {
return ErrDigestInvalidFormat
}
-
- algorithm := Algorithm(s[:i])
+ algorithm, encoded := Algorithm(s[:i]), s[i+1:]
if !algorithm.Available() {
+ if !DigestRegexpAnchored.MatchString(s) {
+ return ErrDigestInvalidFormat
+ }
return ErrDigestUnsupported
}
-
- // Digests much always be hex-encoded, ensuring that their hex portion will
- // always be size*2
- if algorithm.Size()*2 != len(s[i+1:]) {
- return ErrDigestInvalidLength
- }
-
- return nil
+ return algorithm.Validate(encoded)
}
// Algorithm returns the algorithm portion of the digest. This will panic if
@@ -133,12 +130,17 @@ func (d Digest) Verifier() Verifier {
}
}
-// Hex returns the hex digest portion of the digest. This will panic if the
+// Encoded returns the encoded portion of the digest. This will panic if the
// underlying digest is not in a valid format.
-func (d Digest) Hex() string {
+func (d Digest) Encoded() string {
return string(d[d.sepIndex()+1:])
}
+// Hex is deprecated. Please use Digest.Encoded.
+func (d Digest) Hex() string {
+ return d.Encoded()
+}
+
func (d Digest) String() string {
return string(d)
}
diff --git a/vendor/github.com/opencontainers/runc/README.md b/vendor/github.com/opencontainers/runc/README.md
index 5215e32c1..e755fb7bc 100644
--- a/vendor/github.com/opencontainers/runc/README.md
+++ b/vendor/github.com/opencontainers/runc/README.md
@@ -68,6 +68,7 @@ make BUILDTAGS='seccomp apparmor'
| selinux | selinux process and mount labeling | <none> |
| apparmor | apparmor profile support | <none> |
| ambient | ambient capability support | kernel 4.3 |
+| nokmem | disable kernel memory account | <none> |
### Running the test suite
@@ -87,6 +88,18 @@ You can run a specific test case by setting the `TESTFLAGS` variable.
# make test TESTFLAGS="-run=SomeTestFunction"
```
+You can run a specific integration test by setting the `TESTPATH` variable.
+
+```bash
+# make test TESTPATH="/checkpoint.bats"
+```
+
+You can run a test in your proxy environment by setting `DOCKER_BUILD_PROXY` and `DOCKER_RUN_PROXY` variables.
+
+```bash
+# make test DOCKER_BUILD_PROXY="--build-arg HTTP_PROXY=http://yourproxy/" DOCKER_RUN_PROXY="-e HTTP_PROXY=http://yourproxy/"
+```
+
### Dependencies Management
`runc` uses [vndr](https://github.com/LK4D4/vndr) for dependencies management.
@@ -251,3 +264,7 @@ PIDFile=/run/mycontainerid.pid
[Install]
WantedBy=multi-user.target
```
+
+## License
+
+The code and docs are released under the [Apache 2.0 license](LICENSE).
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/README.md b/vendor/github.com/opencontainers/runc/libcontainer/README.md
index 42f3efe56..1d7fa04c0 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/README.md
+++ b/vendor/github.com/opencontainers/runc/libcontainer/README.md
@@ -148,6 +148,7 @@ config := &configs.Config{
{Type: configs.NEWPID},
{Type: configs.NEWUSER},
{Type: configs.NEWNET},
+ {Type: configs.NEWCGROUP},
}),
Cgroups: &configs.Cgroup{
Name: "test-container",
@@ -323,6 +324,7 @@ generated when building libcontainer with docker.
## Copyright and license
-Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
-Docs released under Creative commons.
-
+Code and documentation copyright 2014 Docker, inc.
+The code and documentation are released under the [Apache 2.0 license](../LICENSE).
+The documentation is also released under Creative Commons Attribution 4.0 International License.
+You may obtain a copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
index 7c995efee..ea571ad93 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
@@ -13,40 +13,50 @@ import (
"strings"
"time"
- "github.com/docker/go-units"
+ units "github.com/docker/go-units"
)
const (
- cgroupNamePrefix = "name="
+ CgroupNamePrefix = "name="
CgroupProcesses = "cgroup.procs"
)
// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt
-func FindCgroupMountpoint(subsystem string) (string, error) {
- mnt, _, err := FindCgroupMountpointAndRoot(subsystem)
+func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) {
+ mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem)
return mnt, err
}
-func FindCgroupMountpointAndRoot(subsystem string) (string, string, error) {
+func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string, error) {
// We are not using mount.GetMounts() because it's super-inefficient,
// parsing it directly sped up x10 times because of not using Sscanf.
// It was one of two major performance drawbacks in container start.
if !isSubsystemAvailable(subsystem) {
return "", "", NewNotFoundError(subsystem)
}
+
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return "", "", err
}
defer f.Close()
- scanner := bufio.NewScanner(f)
+ return findCgroupMountpointAndRootFromReader(f, cgroupPath, subsystem)
+}
+
+func findCgroupMountpointAndRootFromReader(reader io.Reader, cgroupPath, subsystem string) (string, string, error) {
+ scanner := bufio.NewScanner(reader)
for scanner.Scan() {
txt := scanner.Text()
- fields := strings.Split(txt, " ")
- for _, opt := range strings.Split(fields[len(fields)-1], ",") {
- if opt == subsystem {
- return fields[4], fields[3], nil
+ fields := strings.Fields(txt)
+ if len(fields) < 5 {
+ continue
+ }
+ if strings.HasPrefix(fields[4], cgroupPath) {
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if opt == subsystem {
+ return fields[4], fields[3], nil
+ }
}
}
}
@@ -103,7 +113,7 @@ func FindCgroupMountpointDir() (string, error) {
}
if postSeparatorFields[0] == "cgroup" {
- // Check that the mount is properly formated.
+ // Check that the mount is properly formatted.
if numPostFields < 3 {
return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
}
@@ -151,19 +161,20 @@ func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount,
Root: fields[3],
}
for _, opt := range strings.Split(fields[len(fields)-1], ",") {
- if !ss[opt] {
+ seen, known := ss[opt]
+ if !known || (!all && seen) {
continue
}
- if strings.HasPrefix(opt, cgroupNamePrefix) {
- m.Subsystems = append(m.Subsystems, opt[len(cgroupNamePrefix):])
- } else {
- m.Subsystems = append(m.Subsystems, opt)
- }
- if !all {
- numFound++
+ ss[opt] = true
+ if strings.HasPrefix(opt, CgroupNamePrefix) {
+ opt = opt[len(CgroupNamePrefix):]
}
+ m.Subsystems = append(m.Subsystems, opt)
+ numFound++
+ }
+ if len(m.Subsystems) > 0 || all {
+ res = append(res, m)
}
- res = append(res, m)
}
if err := scanner.Err(); err != nil {
return nil, err
@@ -187,7 +198,7 @@ func GetCgroupMounts(all bool) ([]Mount, error) {
allMap := make(map[string]bool)
for s := range allSubsystems {
- allMap[s] = true
+ allMap[s] = false
}
return getCgroupMountsHelper(allMap, f, all)
}
@@ -256,13 +267,13 @@ func GetInitCgroupPath(subsystem string) (string, error) {
}
func getCgroupPathHelper(subsystem, cgroup string) (string, error) {
- mnt, root, err := FindCgroupMountpointAndRoot(subsystem)
+ mnt, root, err := FindCgroupMountpointAndRoot("", subsystem)
if err != nil {
return "", err
}
// This is needed for nested containers, because in /proc/self/cgroup we
- // see pathes from host, which don't exist in container.
+ // see paths from host, which don't exist in container.
relCgroup, err := filepath.Rel(root, cgroup)
if err != nil {
return "", err
@@ -342,7 +353,7 @@ func getControllerPath(subsystem string, cgroups map[string]string) (string, err
return p, nil
}
- if p, ok := cgroups[cgroupNamePrefix+subsystem]; ok {
+ if p, ok := cgroups[CgroupNamePrefix+subsystem]; ok {
return p, nil
}
@@ -452,7 +463,7 @@ func WriteCgroupProc(dir string, pid int) error {
return fmt.Errorf("no such directory for %s", CgroupProcesses)
}
- // Dont attach any pid to the cgroup if -1 is specified as a pid
+ // Don't attach any pid to the cgroup if -1 is specified as a pid
if pid != -1 {
if err := ioutil.WriteFile(filepath.Join(dir, CgroupProcesses), []byte(strconv.Itoa(pid)), 0700); err != nil {
return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err)
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
index b1c4762fe..7728522fe 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
@@ -186,12 +186,19 @@ type Config struct {
// callers keyring in this case.
NoNewKeyring bool `json:"no_new_keyring"`
- // Rootless specifies whether the container is a rootless container.
- Rootless bool `json:"rootless"`
-
- // IntelRdt specifies settings for Intel RDT/CAT group that the container is placed into
- // to limit the resources (e.g., L3 cache) the container has available
+ // IntelRdt specifies settings for Intel RDT group that the container is placed into
+ // to limit the resources (e.g., L3 cache, memory bandwidth) the container has available
IntelRdt *IntelRdt `json:"intel_rdt,omitempty"`
+
+ // RootlessEUID is set when the runc was launched with non-zero EUID.
+ // Note that RootlessEUID is set to false when launched with EUID=0 in userns.
+ // When RootlessEUID is set, runc creates a new userns for the container.
+ // (config.json needs to contain userns settings)
+ RootlessEUID bool `json:"rootless_euid,omitempty"`
+
+ // RootlessCgroups is set when unlikely to have the full access to cgroups.
+ // When RootlessCgroups is set, cgroups errors are ignored.
+ RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
}
type Hooks struct {
@@ -265,26 +272,23 @@ func (hooks Hooks) MarshalJSON() ([]byte, error) {
})
}
-// HookState is the payload provided to a hook on execution.
-type HookState specs.State
-
type Hook interface {
// Run executes the hook with the provided state.
- Run(HookState) error
+ Run(*specs.State) error
}
// NewFunctionHook will call the provided function when the hook is run.
-func NewFunctionHook(f func(HookState) error) FuncHook {
+func NewFunctionHook(f func(*specs.State) error) FuncHook {
return FuncHook{
run: f,
}
}
type FuncHook struct {
- run func(HookState) error
+ run func(*specs.State) error
}
-func (f FuncHook) Run(s HookState) error {
+func (f FuncHook) Run(s *specs.State) error {
return f.run(s)
}
@@ -307,7 +311,7 @@ type CommandHook struct {
Command
}
-func (c Command) Run(s HookState) error {
+func (c Command) Run(s *specs.State) error {
b, err := json.Marshal(s)
if err != nil {
return err
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go
index 36bd5f96a..57e9f037d 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go
@@ -4,4 +4,10 @@ type IntelRdt struct {
// The schema for L3 cache id and capacity bitmask (CBM)
// Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
L3CacheSchema string `json:"l3_cache_schema,omitempty"`
+
+ // The schema of memory bandwidth per L3 cache id
+ // Format: "MB:<cache_id0>=bandwidth0;<cache_id1>=bandwidth1;..."
+ // The unit of memory bandwidth is specified in "percentages" by
+ // default, and in "MBps" if MBA Software Controller is enabled.
+ MemBwSchema string `json:"memBwSchema,omitempty"`
}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go
index 5fc171a57..1bbaef9bd 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go
@@ -7,12 +7,13 @@ import (
)
const (
- NEWNET NamespaceType = "NEWNET"
- NEWPID NamespaceType = "NEWPID"
- NEWNS NamespaceType = "NEWNS"
- NEWUTS NamespaceType = "NEWUTS"
- NEWIPC NamespaceType = "NEWIPC"
- NEWUSER NamespaceType = "NEWUSER"
+ NEWNET NamespaceType = "NEWNET"
+ NEWPID NamespaceType = "NEWPID"
+ NEWNS NamespaceType = "NEWNS"
+ NEWUTS NamespaceType = "NEWUTS"
+ NEWIPC NamespaceType = "NEWIPC"
+ NEWUSER NamespaceType = "NEWUSER"
+ NEWCGROUP NamespaceType = "NEWCGROUP"
)
var (
@@ -35,6 +36,8 @@ func NsName(ns NamespaceType) string {
return "user"
case NEWUTS:
return "uts"
+ case NEWCGROUP:
+ return "cgroup"
}
return ""
}
@@ -68,6 +71,7 @@ func NamespaceTypes() []NamespaceType {
NEWNET,
NEWPID,
NEWNS,
+ NEWCGROUP,
}
}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
index 4ce6813d2..2dc7adfc9 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
@@ -9,12 +9,13 @@ func (n *Namespace) Syscall() int {
}
var namespaceInfo = map[NamespaceType]int{
- NEWNET: unix.CLONE_NEWNET,
- NEWNS: unix.CLONE_NEWNS,
- NEWUSER: unix.CLONE_NEWUSER,
- NEWIPC: unix.CLONE_NEWIPC,
- NEWUTS: unix.CLONE_NEWUTS,
- NEWPID: unix.CLONE_NEWPID,
+ NEWNET: unix.CLONE_NEWNET,
+ NEWNS: unix.CLONE_NEWNS,
+ NEWUSER: unix.CLONE_NEWUSER,
+ NEWIPC: unix.CLONE_NEWIPC,
+ NEWUTS: unix.CLONE_NEWUTS,
+ NEWPID: unix.CLONE_NEWPID,
+ NEWCGROUP: unix.CLONE_NEWCGROUP,
}
// CloneFlags parses the container's Namespaces options to set the correct
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md
index 575701371..9ec6c3931 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md
+++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md
@@ -10,8 +10,8 @@ The `nsenter` package will `import "C"` and it uses [cgo](https://golang.org/cmd
package. In cgo, if the import of "C" is immediately preceded by a comment, that comment,
called the preamble, is used as a header when compiling the C parts of the package.
So every time we import package `nsenter`, the C code function `nsexec()` would be
-called. And package `nsenter` is now only imported in `main_unix.go`, so every time
-before we call `cmd.Start` on linux, that C code would run.
+called. And package `nsenter` is only imported in `init.go`, so every time the runc
+`init` command is invoked, that C code is run.
Because `nsexec()` must be run before the Go runtime in order to use the
Linux kernel namespace, you must `import` this library into a package if
@@ -37,7 +37,7 @@ the parent `nsexec()` will exit and the child `nsexec()` process will
return to allow the Go runtime take over.
NOTE: We do both `setns(2)` and `clone(2)` even if we don't have any
-CLONE_NEW* clone flags because we must fork a new process in order to
+`CLONE_NEW*` clone flags because we must fork a new process in order to
enter the PID namespace.
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
index a4cd1399d..28269dfc0 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
+++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
@@ -42,6 +42,12 @@ enum sync_t {
SYNC_ERR = 0xFF, /* Fatal error, no turning back. The error code follows. */
};
+/*
+ * Synchronisation value for cgroup namespace setup.
+ * The same constant is defined in process_linux.go as "createCgroupns".
+ */
+#define CREATECGROUPNS 0x80
+
/* longjmp() arguments. */
#define JUMP_PARENT 0x00
#define JUMP_CHILD 0xA0
@@ -82,7 +88,7 @@ struct nlconfig_t {
uint8_t is_setgroup;
/* Rootless container settings. */
- uint8_t is_rootless;
+ uint8_t is_rootless_euid; /* boolean */
char *uidmappath;
size_t uidmappath_len;
char *gidmappath;
@@ -100,7 +106,7 @@ struct nlconfig_t {
#define GIDMAP_ATTR 27284
#define SETGROUP_ATTR 27285
#define OOM_SCORE_ADJ_ATTR 27286
-#define ROOTLESS_ATTR 27287
+#define ROOTLESS_EUID_ATTR 27287
#define UIDMAPPATH_ATTR 27288
#define GIDMAPPATH_ATTR 27289
@@ -211,7 +217,7 @@ static int try_mapping_tool(const char *app, int pid, char *map, size_t map_len)
/*
* If @app is NULL, execve will segfault. Just check it here and bail (if
- * we're in this path, the caller is already getting desparate and there
+ * we're in this path, the caller is already getting desperate and there
* isn't a backup to this failing). This usually would be a configuration
* or programming issue.
*/
@@ -419,8 +425,8 @@ static void nl_parse(int fd, struct nlconfig_t *config)
case CLONE_FLAGS_ATTR:
config->cloneflags = readint32(current);
break;
- case ROOTLESS_ATTR:
- config->is_rootless = readint8(current);
+ case ROOTLESS_EUID_ATTR:
+ config->is_rootless_euid = readint8(current); /* boolean */
break;
case OOM_SCORE_ADJ_ATTR:
config->oom_score_adj = current;
@@ -640,7 +646,6 @@ void nsexec(void)
case JUMP_PARENT:{
int len;
pid_t child, first_child = -1;
- char buf[JSON_MAX];
bool ready = false;
/* For debugging. */
@@ -687,7 +692,7 @@ void nsexec(void)
* newuidmap/newgidmap shall be used.
*/
- if (config.is_rootless && !config.is_setgroup)
+ if (config.is_rootless_euid && !config.is_setgroup)
update_setgroups(child, SETGROUPS_DENY);
/* Set up mappings. */
@@ -716,6 +721,18 @@ void nsexec(void)
kill(child, SIGKILL);
bail("failed to sync with child: write(SYNC_RECVPID_ACK)");
}
+
+ /* Send the init_func pid back to our parent.
+ *
+ * Send the init_func pid and the pid of the first child back to our parent.
+ * We need to send both back because we can't reap the first child we created (CLONE_PARENT).
+ * It becomes the responsibility of our parent to reap the first child.
+ */
+ len = dprintf(pipenum, "{\"pid\": %d, \"pid_first\": %d}\n", child, first_child);
+ if (len < 0) {
+ kill(child, SIGKILL);
+ bail("unable to generate JSON for child pid");
+ }
}
break;
case SYNC_CHILD_READY:
@@ -759,23 +776,6 @@ void nsexec(void)
bail("unexpected sync value: %u", s);
}
}
-
- /*
- * Send the init_func pid and the pid of the first child back to our parent.
- *
- * We need to send both back because we can't reap the first child we created (CLONE_PARENT).
- * It becomes the responsibility of our parent to reap the first child.
- */
- len = snprintf(buf, JSON_MAX, "{\"pid\": %d, \"pid_first\": %d}\n", child, first_child);
- if (len < 0) {
- kill(child, SIGKILL);
- bail("unable to generate JSON for child pid");
- }
- if (write(pipenum, buf, len) != len) {
- kill(child, SIGKILL);
- bail("unable to send child pid to bootstrapper");
- }
-
exit(0);
}
@@ -862,14 +862,17 @@ void nsexec(void)
if (setresuid(0, 0, 0) < 0)
bail("failed to become root in user namespace");
}
-
/*
- * Unshare all of the namespaces. Note that we don't merge this
- * with clone() because there were some old kernel versions where
- * clone(CLONE_PARENT | CLONE_NEWPID) was broken, so we'll just do
- * it the long way.
+ * Unshare all of the namespaces. Now, it should be noted that this
+ * ordering might break in the future (especially with rootless
+ * containers). But for now, it's not possible to split this into
+ * CLONE_NEWUSER + [the rest] because of some RHEL SELinux issues.
+ *
+ * Note that we don't merge this with clone() because there were
+ * some old kernel versions where clone(CLONE_PARENT | CLONE_NEWPID)
+ * was broken, so we'll just do it the long way anyway.
*/
- if (unshare(config.cloneflags) < 0)
+ if (unshare(config.cloneflags & ~CLONE_NEWCGROUP) < 0)
bail("failed to unshare namespaces");
/*
@@ -953,11 +956,23 @@ void nsexec(void)
if (setgid(0) < 0)
bail("setgid failed");
- if (!config.is_rootless && config.is_setgroup) {
+ if (!config.is_rootless_euid && config.is_setgroup) {
if (setgroups(0, NULL) < 0)
bail("setgroups failed");
}
+ /* ... wait until our topmost parent has finished cgroup setup in p.manager.Apply() ... */
+ if (config.cloneflags & CLONE_NEWCGROUP) {
+ uint8_t value;
+ if (read(pipenum, &value, sizeof(value)) != sizeof(value))
+ bail("read synchronisation value failed");
+ if (value == CREATECGROUPNS) {
+ if (unshare(CLONE_NEWCGROUP) < 0)
+ bail("failed to unshare cgroup namespace");
+ } else
+ bail("received unknown synchronisation value");
+ }
+
s = SYNC_CHILD_READY;
if (write(syncfd, &s, sizeof(s)) != sizeof(s))
bail("failed to sync with patent: write(SYNC_CHILD_READY)");
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
index c1e634c94..92b5ae8de 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
@@ -5,6 +5,7 @@ package user
import (
"io"
"os"
+ "strconv"
"golang.org/x/sys/unix"
)
@@ -115,22 +116,23 @@ func CurrentGroup() (Group, error) {
return LookupGid(unix.Getgid())
}
-func CurrentUserSubUIDs() ([]SubID, error) {
+func currentUserSubIDs(fileName string) ([]SubID, error) {
u, err := CurrentUser()
if err != nil {
return nil, err
}
- return ParseSubIDFileFilter("/etc/subuid",
- func(entry SubID) bool { return entry.Name == u.Name })
+ filter := func(entry SubID) bool {
+ return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid)
+ }
+ return ParseSubIDFileFilter(fileName, filter)
}
-func CurrentGroupSubGIDs() ([]SubID, error) {
- g, err := CurrentGroup()
- if err != nil {
- return nil, err
- }
- return ParseSubIDFileFilter("/etc/subgid",
- func(entry SubID) bool { return entry.Name == g.Name })
+func CurrentUserSubUIDs() ([]SubID, error) {
+ return currentUserSubIDs("/etc/subuid")
+}
+
+func CurrentUserSubGIDs() ([]SubID, error) {
+ return currentUserSubIDs("/etc/subgid")
}
func CurrentProcessUIDMap() ([]IDMap, error) {
diff --git a/vendor/github.com/opencontainers/runc/vendor.conf b/vendor/github.com/opencontainers/runc/vendor.conf
index e2b519e67..fadbe0707 100644
--- a/vendor/github.com/opencontainers/runc/vendor.conf
+++ b/vendor/github.com/opencontainers/runc/vendor.conf
@@ -1,7 +1,7 @@
# OCI runtime-spec. When updating this, make sure you use a version tag rather
# than a commit ID so it's much more obvious what version of the spec we are
# using.
-github.com/opencontainers/runtime-spec v1.0.0
+github.com/opencontainers/runtime-spec 5684b8af48c1ac3b1451fa499724e30e3c20a294
# Core libcontainer functionality.
github.com/mrunalp/fileutils ed869b029674c0e9ce4c0dfa781405c2d9946d08
github.com/opencontainers/selinux v1.0.0-rc1
diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go
new file mode 100644
index 000000000..cd0a8ac15
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/atom.go
@@ -0,0 +1,78 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package atom provides integer codes (also known as atoms) for a fixed set of
+// frequently occurring HTML strings: tag names and attribute keys such as "p"
+// and "id".
+//
+// Sharing an atom's name between all elements with the same tag can result in
+// fewer string allocations when tokenizing and parsing HTML. Integer
+// comparisons are also generally faster than string comparisons.
+//
+// The value of an atom's particular code is not guaranteed to stay the same
+// between versions of this package. Neither is any ordering guaranteed:
+// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
+// be dense. The only guarantees are that e.g. looking up "div" will yield
+// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
+package atom // import "golang.org/x/net/html/atom"
+
+// Atom is an integer code for a string. The zero value maps to "".
+type Atom uint32
+
+// String returns the atom's name.
+func (a Atom) String() string {
+ start := uint32(a >> 8)
+ n := uint32(a & 0xff)
+ if start+n > uint32(len(atomText)) {
+ return ""
+ }
+ return atomText[start : start+n]
+}
+
+func (a Atom) string() string {
+ return atomText[a>>8 : a>>8+a&0xff]
+}
+
+// fnv computes the FNV hash with an arbitrary starting value h.
+func fnv(h uint32, s []byte) uint32 {
+ for i := range s {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+func match(s string, t []byte) bool {
+ for i, c := range t {
+ if s[i] != c {
+ return false
+ }
+ }
+ return true
+}
+
+// Lookup returns the atom whose name is s. It returns zero if there is no
+// such atom. The lookup is case sensitive.
+func Lookup(s []byte) Atom {
+ if len(s) == 0 || len(s) > maxAtomLen {
+ return 0
+ }
+ h := fnv(hash0, s)
+ if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
+ return a
+ }
+ if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
+ return a
+ }
+ return 0
+}
+
+// String returns a string whose contents are equal to s. In that sense, it is
+// equivalent to string(s) but may be more efficient.
+func String(s []byte) string {
+ if a := Lookup(s); a != 0 {
+ return a.String()
+ }
+ return string(s)
+}
diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go
new file mode 100644
index 000000000..6bfa86601
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/gen.go
@@ -0,0 +1,648 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// This program generates table.go and table_test.go.
+// Invoke as
+//
+// go run gen.go |gofmt >table.go
+// go run gen.go -test |gofmt >table_test.go
+
+import (
+ "flag"
+ "fmt"
+ "math/rand"
+ "os"
+ "sort"
+ "strings"
+)
+
+// identifier converts s to a Go exported identifier.
+// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
+func identifier(s string) string {
+ b := make([]byte, 0, len(s))
+ cap := true
+ for _, c := range s {
+ if c == '-' {
+ cap = true
+ continue
+ }
+ if cap && 'a' <= c && c <= 'z' {
+ c -= 'a' - 'A'
+ }
+ cap = false
+ b = append(b, byte(c))
+ }
+ return string(b)
+}
+
+var test = flag.Bool("test", false, "generate table_test.go")
+
+func main() {
+ flag.Parse()
+
+ var all []string
+ all = append(all, elements...)
+ all = append(all, attributes...)
+ all = append(all, eventHandlers...)
+ all = append(all, extra...)
+ sort.Strings(all)
+
+ if *test {
+ fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n")
+ fmt.Printf("package atom\n\n")
+ fmt.Printf("var testAtomList = []string{\n")
+ for _, s := range all {
+ fmt.Printf("\t%q,\n", s)
+ }
+ fmt.Printf("}\n")
+ return
+ }
+
+ // uniq - lists have dups
+ // compute max len too
+ maxLen := 0
+ w := 0
+ for _, s := range all {
+ if w == 0 || all[w-1] != s {
+ if maxLen < len(s) {
+ maxLen = len(s)
+ }
+ all[w] = s
+ w++
+ }
+ }
+ all = all[:w]
+
+ // Find hash that minimizes table size.
+ var best *table
+ for i := 0; i < 1000000; i++ {
+ if best != nil && 1<<(best.k-1) < len(all) {
+ break
+ }
+ h := rand.Uint32()
+ for k := uint(0); k <= 16; k++ {
+ if best != nil && k >= best.k {
+ break
+ }
+ var t table
+ if t.init(h, k, all) {
+ best = &t
+ break
+ }
+ }
+ }
+ if best == nil {
+ fmt.Fprintf(os.Stderr, "failed to construct string table\n")
+ os.Exit(1)
+ }
+
+ // Lay out strings, using overlaps when possible.
+ layout := append([]string{}, all...)
+
+ // Remove strings that are substrings of other strings
+ for changed := true; changed; {
+ changed = false
+ for i, s := range layout {
+ if s == "" {
+ continue
+ }
+ for j, t := range layout {
+ if i != j && t != "" && strings.Contains(s, t) {
+ changed = true
+ layout[j] = ""
+ }
+ }
+ }
+ }
+
+ // Join strings where one suffix matches another prefix.
+ for {
+ // Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
+ // maximizing overlap length k.
+ besti := -1
+ bestj := -1
+ bestk := 0
+ for i, s := range layout {
+ if s == "" {
+ continue
+ }
+ for j, t := range layout {
+ if i == j {
+ continue
+ }
+ for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
+ if s[len(s)-k:] == t[:k] {
+ besti = i
+ bestj = j
+ bestk = k
+ }
+ }
+ }
+ }
+ if bestk > 0 {
+ layout[besti] += layout[bestj][bestk:]
+ layout[bestj] = ""
+ continue
+ }
+ break
+ }
+
+ text := strings.Join(layout, "")
+
+ atom := map[string]uint32{}
+ for _, s := range all {
+ off := strings.Index(text, s)
+ if off < 0 {
+ panic("lost string " + s)
+ }
+ atom[s] = uint32(off<<8 | len(s))
+ }
+
+ // Generate the Go code.
+ fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n")
+ fmt.Printf("package atom\n\nconst (\n")
+ for _, s := range all {
+ fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s])
+ }
+ fmt.Printf(")\n\n")
+
+ fmt.Printf("const hash0 = %#x\n\n", best.h0)
+ fmt.Printf("const maxAtomLen = %d\n\n", maxLen)
+
+ fmt.Printf("var table = [1<<%d]Atom{\n", best.k)
+ for i, s := range best.tab {
+ if s == "" {
+ continue
+ }
+ fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s)
+ }
+ fmt.Printf("}\n")
+ datasize := (1 << best.k) * 4
+
+ fmt.Printf("const atomText =\n")
+ textsize := len(text)
+ for len(text) > 60 {
+ fmt.Printf("\t%q +\n", text[:60])
+ text = text[60:]
+ }
+ fmt.Printf("\t%q\n\n", text)
+
+ fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
+}
+
+type byLen []string
+
+func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
+func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byLen) Len() int { return len(x) }
+
+// fnv computes the FNV hash with an arbitrary starting value h.
+func fnv(h uint32, s string) uint32 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+// A table represents an attempt at constructing the lookup table.
+// The lookup table uses cuckoo hashing, meaning that each string
+// can be found in one of two positions.
+type table struct {
+ h0 uint32
+ k uint
+ mask uint32
+ tab []string
+}
+
+// hash returns the two hashes for s.
+func (t *table) hash(s string) (h1, h2 uint32) {
+ h := fnv(t.h0, s)
+ h1 = h & t.mask
+ h2 = (h >> 16) & t.mask
+ return
+}
+
+// init initializes the table with the given parameters.
+// h0 is the initial hash value,
+// k is the number of bits of hash value to use, and
+// x is the list of strings to store in the table.
+// init returns false if the table cannot be constructed.
+func (t *table) init(h0 uint32, k uint, x []string) bool {
+ t.h0 = h0
+ t.k = k
+ t.tab = make([]string, 1<<k)
+ t.mask = 1<<k - 1
+ for _, s := range x {
+ if !t.insert(s) {
+ return false
+ }
+ }
+ return true
+}
+
+// insert inserts s in the table.
+func (t *table) insert(s string) bool {
+ h1, h2 := t.hash(s)
+ if t.tab[h1] == "" {
+ t.tab[h1] = s
+ return true
+ }
+ if t.tab[h2] == "" {
+ t.tab[h2] = s
+ return true
+ }
+ if t.push(h1, 0) {
+ t.tab[h1] = s
+ return true
+ }
+ if t.push(h2, 0) {
+ t.tab[h2] = s
+ return true
+ }
+ return false
+}
+
+// push attempts to push aside the entry in slot i.
+func (t *table) push(i uint32, depth int) bool {
+ if depth > len(t.tab) {
+ return false
+ }
+ s := t.tab[i]
+ h1, h2 := t.hash(s)
+ j := h1 + h2 - i
+ if t.tab[j] != "" && !t.push(j, depth+1) {
+ return false
+ }
+ t.tab[j] = s
+ return true
+}
+
+// The lists of element names and attribute keys were taken from
+// https://html.spec.whatwg.org/multipage/indices.html#index
+// as of the "HTML Living Standard - Last Updated 21 February 2015" version.
+
+var elements = []string{
+ "a",
+ "abbr",
+ "address",
+ "area",
+ "article",
+ "aside",
+ "audio",
+ "b",
+ "base",
+ "bdi",
+ "bdo",
+ "blockquote",
+ "body",
+ "br",
+ "button",
+ "canvas",
+ "caption",
+ "cite",
+ "code",
+ "col",
+ "colgroup",
+ "command",
+ "data",
+ "datalist",
+ "dd",
+ "del",
+ "details",
+ "dfn",
+ "dialog",
+ "div",
+ "dl",
+ "dt",
+ "em",
+ "embed",
+ "fieldset",
+ "figcaption",
+ "figure",
+ "footer",
+ "form",
+ "h1",
+ "h2",
+ "h3",
+ "h4",
+ "h5",
+ "h6",
+ "head",
+ "header",
+ "hgroup",
+ "hr",
+ "html",
+ "i",
+ "iframe",
+ "img",
+ "input",
+ "ins",
+ "kbd",
+ "keygen",
+ "label",
+ "legend",
+ "li",
+ "link",
+ "map",
+ "mark",
+ "menu",
+ "menuitem",
+ "meta",
+ "meter",
+ "nav",
+ "noscript",
+ "object",
+ "ol",
+ "optgroup",
+ "option",
+ "output",
+ "p",
+ "param",
+ "pre",
+ "progress",
+ "q",
+ "rp",
+ "rt",
+ "ruby",
+ "s",
+ "samp",
+ "script",
+ "section",
+ "select",
+ "small",
+ "source",
+ "span",
+ "strong",
+ "style",
+ "sub",
+ "summary",
+ "sup",
+ "table",
+ "tbody",
+ "td",
+ "template",
+ "textarea",
+ "tfoot",
+ "th",
+ "thead",
+ "time",
+ "title",
+ "tr",
+ "track",
+ "u",
+ "ul",
+ "var",
+ "video",
+ "wbr",
+}
+
+// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
+
+var attributes = []string{
+ "abbr",
+ "accept",
+ "accept-charset",
+ "accesskey",
+ "action",
+ "alt",
+ "async",
+ "autocomplete",
+ "autofocus",
+ "autoplay",
+ "challenge",
+ "charset",
+ "checked",
+ "cite",
+ "class",
+ "cols",
+ "colspan",
+ "command",
+ "content",
+ "contenteditable",
+ "contextmenu",
+ "controls",
+ "coords",
+ "crossorigin",
+ "data",
+ "datetime",
+ "default",
+ "defer",
+ "dir",
+ "dirname",
+ "disabled",
+ "download",
+ "draggable",
+ "dropzone",
+ "enctype",
+ "for",
+ "form",
+ "formaction",
+ "formenctype",
+ "formmethod",
+ "formnovalidate",
+ "formtarget",
+ "headers",
+ "height",
+ "hidden",
+ "high",
+ "href",
+ "hreflang",
+ "http-equiv",
+ "icon",
+ "id",
+ "inputmode",
+ "ismap",
+ "itemid",
+ "itemprop",
+ "itemref",
+ "itemscope",
+ "itemtype",
+ "keytype",
+ "kind",
+ "label",
+ "lang",
+ "list",
+ "loop",
+ "low",
+ "manifest",
+ "max",
+ "maxlength",
+ "media",
+ "mediagroup",
+ "method",
+ "min",
+ "minlength",
+ "multiple",
+ "muted",
+ "name",
+ "novalidate",
+ "open",
+ "optimum",
+ "pattern",
+ "ping",
+ "placeholder",
+ "poster",
+ "preload",
+ "radiogroup",
+ "readonly",
+ "rel",
+ "required",
+ "reversed",
+ "rows",
+ "rowspan",
+ "sandbox",
+ "spellcheck",
+ "scope",
+ "scoped",
+ "seamless",
+ "selected",
+ "shape",
+ "size",
+ "sizes",
+ "sortable",
+ "sorted",
+ "span",
+ "src",
+ "srcdoc",
+ "srclang",
+ "start",
+ "step",
+ "style",
+ "tabindex",
+ "target",
+ "title",
+ "translate",
+ "type",
+ "typemustmatch",
+ "usemap",
+ "value",
+ "width",
+ "wrap",
+}
+
+var eventHandlers = []string{
+ "onabort",
+ "onautocomplete",
+ "onautocompleteerror",
+ "onafterprint",
+ "onbeforeprint",
+ "onbeforeunload",
+ "onblur",
+ "oncancel",
+ "oncanplay",
+ "oncanplaythrough",
+ "onchange",
+ "onclick",
+ "onclose",
+ "oncontextmenu",
+ "oncuechange",
+ "ondblclick",
+ "ondrag",
+ "ondragend",
+ "ondragenter",
+ "ondragleave",
+ "ondragover",
+ "ondragstart",
+ "ondrop",
+ "ondurationchange",
+ "onemptied",
+ "onended",
+ "onerror",
+ "onfocus",
+ "onhashchange",
+ "oninput",
+ "oninvalid",
+ "onkeydown",
+ "onkeypress",
+ "onkeyup",
+ "onlanguagechange",
+ "onload",
+ "onloadeddata",
+ "onloadedmetadata",
+ "onloadstart",
+ "onmessage",
+ "onmousedown",
+ "onmousemove",
+ "onmouseout",
+ "onmouseover",
+ "onmouseup",
+ "onmousewheel",
+ "onoffline",
+ "ononline",
+ "onpagehide",
+ "onpageshow",
+ "onpause",
+ "onplay",
+ "onplaying",
+ "onpopstate",
+ "onprogress",
+ "onratechange",
+ "onreset",
+ "onresize",
+ "onscroll",
+ "onseeked",
+ "onseeking",
+ "onselect",
+ "onshow",
+ "onsort",
+ "onstalled",
+ "onstorage",
+ "onsubmit",
+ "onsuspend",
+ "ontimeupdate",
+ "ontoggle",
+ "onunload",
+ "onvolumechange",
+ "onwaiting",
+}
+
+// extra are ad-hoc values not covered by any of the lists above.
+var extra = []string{
+ "align",
+ "annotation",
+ "annotation-xml",
+ "applet",
+ "basefont",
+ "bgsound",
+ "big",
+ "blink",
+ "center",
+ "color",
+ "desc",
+ "face",
+ "font",
+ "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
+ "foreignobject",
+ "frame",
+ "frameset",
+ "image",
+ "isindex",
+ "listing",
+ "malignmark",
+ "marquee",
+ "math",
+ "mglyph",
+ "mi",
+ "mn",
+ "mo",
+ "ms",
+ "mtext",
+ "nobr",
+ "noembed",
+ "noframes",
+ "plaintext",
+ "prompt",
+ "public",
+ "spacer",
+ "strike",
+ "svg",
+ "system",
+ "tt",
+ "xmp",
+}
diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go
new file mode 100644
index 000000000..2605ba310
--- /dev/null
+++ b/vendor/golang.org/x/net/html/atom/table.go
@@ -0,0 +1,713 @@
+// generated by go run gen.go; DO NOT EDIT
+
+package atom
+
+const (
+ A Atom = 0x1
+ Abbr Atom = 0x4
+ Accept Atom = 0x2106
+ AcceptCharset Atom = 0x210e
+ Accesskey Atom = 0x3309
+ Action Atom = 0x1f606
+ Address Atom = 0x4f307
+ Align Atom = 0x1105
+ Alt Atom = 0x4503
+ Annotation Atom = 0x1670a
+ AnnotationXml Atom = 0x1670e
+ Applet Atom = 0x2b306
+ Area Atom = 0x2fa04
+ Article Atom = 0x38807
+ Aside Atom = 0x8305
+ Async Atom = 0x7b05
+ Audio Atom = 0xa605
+ Autocomplete Atom = 0x1fc0c
+ Autofocus Atom = 0xb309
+ Autoplay Atom = 0xce08
+ B Atom = 0x101
+ Base Atom = 0xd604
+ Basefont Atom = 0xd608
+ Bdi Atom = 0x1a03
+ Bdo Atom = 0xe703
+ Bgsound Atom = 0x11807
+ Big Atom = 0x12403
+ Blink Atom = 0x12705
+ Blockquote Atom = 0x12c0a
+ Body Atom = 0x2f04
+ Br Atom = 0x202
+ Button Atom = 0x13606
+ Canvas Atom = 0x7f06
+ Caption Atom = 0x1bb07
+ Center Atom = 0x5b506
+ Challenge Atom = 0x21f09
+ Charset Atom = 0x2807
+ Checked Atom = 0x32807
+ Cite Atom = 0x3c804
+ Class Atom = 0x4de05
+ Code Atom = 0x14904
+ Col Atom = 0x15003
+ Colgroup Atom = 0x15008
+ Color Atom = 0x15d05
+ Cols Atom = 0x16204
+ Colspan Atom = 0x16207
+ Command Atom = 0x17507
+ Content Atom = 0x42307
+ Contenteditable Atom = 0x4230f
+ Contextmenu Atom = 0x3310b
+ Controls Atom = 0x18808
+ Coords Atom = 0x19406
+ Crossorigin Atom = 0x19f0b
+ Data Atom = 0x44a04
+ Datalist Atom = 0x44a08
+ Datetime Atom = 0x23c08
+ Dd Atom = 0x26702
+ Default Atom = 0x8607
+ Defer Atom = 0x14b05
+ Del Atom = 0x3ef03
+ Desc Atom = 0x4db04
+ Details Atom = 0x4807
+ Dfn Atom = 0x6103
+ Dialog Atom = 0x1b06
+ Dir Atom = 0x6903
+ Dirname Atom = 0x6907
+ Disabled Atom = 0x10c08
+ Div Atom = 0x11303
+ Dl Atom = 0x11e02
+ Download Atom = 0x40008
+ Draggable Atom = 0x17b09
+ Dropzone Atom = 0x39108
+ Dt Atom = 0x50902
+ Em Atom = 0x6502
+ Embed Atom = 0x6505
+ Enctype Atom = 0x21107
+ Face Atom = 0x5b304
+ Fieldset Atom = 0x1b008
+ Figcaption Atom = 0x1b80a
+ Figure Atom = 0x1cc06
+ Font Atom = 0xda04
+ Footer Atom = 0x8d06
+ For Atom = 0x1d803
+ ForeignObject Atom = 0x1d80d
+ Foreignobject Atom = 0x1e50d
+ Form Atom = 0x1f204
+ Formaction Atom = 0x1f20a
+ Formenctype Atom = 0x20d0b
+ Formmethod Atom = 0x2280a
+ Formnovalidate Atom = 0x2320e
+ Formtarget Atom = 0x2470a
+ Frame Atom = 0x9a05
+ Frameset Atom = 0x9a08
+ H1 Atom = 0x26e02
+ H2 Atom = 0x29402
+ H3 Atom = 0x2a702
+ H4 Atom = 0x2e902
+ H5 Atom = 0x2f302
+ H6 Atom = 0x50b02
+ Head Atom = 0x2d504
+ Header Atom = 0x2d506
+ Headers Atom = 0x2d507
+ Height Atom = 0x25106
+ Hgroup Atom = 0x25906
+ Hidden Atom = 0x26506
+ High Atom = 0x26b04
+ Hr Atom = 0x27002
+ Href Atom = 0x27004
+ Hreflang Atom = 0x27008
+ Html Atom = 0x25504
+ HttpEquiv Atom = 0x2780a
+ I Atom = 0x601
+ Icon Atom = 0x42204
+ Id Atom = 0x8502
+ Iframe Atom = 0x29606
+ Image Atom = 0x29c05
+ Img Atom = 0x2a103
+ Input Atom = 0x3e805
+ Inputmode Atom = 0x3e809
+ Ins Atom = 0x1a803
+ Isindex Atom = 0x2a907
+ Ismap Atom = 0x2b005
+ Itemid Atom = 0x33c06
+ Itemprop Atom = 0x3c908
+ Itemref Atom = 0x5ad07
+ Itemscope Atom = 0x2b909
+ Itemtype Atom = 0x2c308
+ Kbd Atom = 0x1903
+ Keygen Atom = 0x3906
+ Keytype Atom = 0x53707
+ Kind Atom = 0x10904
+ Label Atom = 0xf005
+ Lang Atom = 0x27404
+ Legend Atom = 0x18206
+ Li Atom = 0x1202
+ Link Atom = 0x12804
+ List Atom = 0x44e04
+ Listing Atom = 0x44e07
+ Loop Atom = 0xf404
+ Low Atom = 0x11f03
+ Malignmark Atom = 0x100a
+ Manifest Atom = 0x5f108
+ Map Atom = 0x2b203
+ Mark Atom = 0x1604
+ Marquee Atom = 0x2cb07
+ Math Atom = 0x2d204
+ Max Atom = 0x2e103
+ Maxlength Atom = 0x2e109
+ Media Atom = 0x6e05
+ Mediagroup Atom = 0x6e0a
+ Menu Atom = 0x33804
+ Menuitem Atom = 0x33808
+ Meta Atom = 0x45d04
+ Meter Atom = 0x24205
+ Method Atom = 0x22c06
+ Mglyph Atom = 0x2a206
+ Mi Atom = 0x2eb02
+ Min Atom = 0x2eb03
+ Minlength Atom = 0x2eb09
+ Mn Atom = 0x23502
+ Mo Atom = 0x3ed02
+ Ms Atom = 0x2bc02
+ Mtext Atom = 0x2f505
+ Multiple Atom = 0x30308
+ Muted Atom = 0x30b05
+ Name Atom = 0x6c04
+ Nav Atom = 0x3e03
+ Nobr Atom = 0x5704
+ Noembed Atom = 0x6307
+ Noframes Atom = 0x9808
+ Noscript Atom = 0x3d208
+ Novalidate Atom = 0x2360a
+ Object Atom = 0x1ec06
+ Ol Atom = 0xc902
+ Onabort Atom = 0x13a07
+ Onafterprint Atom = 0x1c00c
+ Onautocomplete Atom = 0x1fa0e
+ Onautocompleteerror Atom = 0x1fa13
+ Onbeforeprint Atom = 0x6040d
+ Onbeforeunload Atom = 0x4e70e
+ Onblur Atom = 0xaa06
+ Oncancel Atom = 0xe908
+ Oncanplay Atom = 0x28509
+ Oncanplaythrough Atom = 0x28510
+ Onchange Atom = 0x3a708
+ Onclick Atom = 0x31007
+ Onclose Atom = 0x31707
+ Oncontextmenu Atom = 0x32f0d
+ Oncuechange Atom = 0x3420b
+ Ondblclick Atom = 0x34d0a
+ Ondrag Atom = 0x35706
+ Ondragend Atom = 0x35709
+ Ondragenter Atom = 0x3600b
+ Ondragleave Atom = 0x36b0b
+ Ondragover Atom = 0x3760a
+ Ondragstart Atom = 0x3800b
+ Ondrop Atom = 0x38f06
+ Ondurationchange Atom = 0x39f10
+ Onemptied Atom = 0x39609
+ Onended Atom = 0x3af07
+ Onerror Atom = 0x3b607
+ Onfocus Atom = 0x3bd07
+ Onhashchange Atom = 0x3da0c
+ Oninput Atom = 0x3e607
+ Oninvalid Atom = 0x3f209
+ Onkeydown Atom = 0x3fb09
+ Onkeypress Atom = 0x4080a
+ Onkeyup Atom = 0x41807
+ Onlanguagechange Atom = 0x43210
+ Onload Atom = 0x44206
+ Onloadeddata Atom = 0x4420c
+ Onloadedmetadata Atom = 0x45510
+ Onloadstart Atom = 0x46b0b
+ Onmessage Atom = 0x47609
+ Onmousedown Atom = 0x47f0b
+ Onmousemove Atom = 0x48a0b
+ Onmouseout Atom = 0x4950a
+ Onmouseover Atom = 0x4a20b
+ Onmouseup Atom = 0x4ad09
+ Onmousewheel Atom = 0x4b60c
+ Onoffline Atom = 0x4c209
+ Ononline Atom = 0x4cb08
+ Onpagehide Atom = 0x4d30a
+ Onpageshow Atom = 0x4fe0a
+ Onpause Atom = 0x50d07
+ Onplay Atom = 0x51706
+ Onplaying Atom = 0x51709
+ Onpopstate Atom = 0x5200a
+ Onprogress Atom = 0x52a0a
+ Onratechange Atom = 0x53e0c
+ Onreset Atom = 0x54a07
+ Onresize Atom = 0x55108
+ Onscroll Atom = 0x55f08
+ Onseeked Atom = 0x56708
+ Onseeking Atom = 0x56f09
+ Onselect Atom = 0x57808
+ Onshow Atom = 0x58206
+ Onsort Atom = 0x58b06
+ Onstalled Atom = 0x59509
+ Onstorage Atom = 0x59e09
+ Onsubmit Atom = 0x5a708
+ Onsuspend Atom = 0x5bb09
+ Ontimeupdate Atom = 0xdb0c
+ Ontoggle Atom = 0x5c408
+ Onunload Atom = 0x5cc08
+ Onvolumechange Atom = 0x5d40e
+ Onwaiting Atom = 0x5e209
+ Open Atom = 0x3cf04
+ Optgroup Atom = 0xf608
+ Optimum Atom = 0x5eb07
+ Option Atom = 0x60006
+ Output Atom = 0x49c06
+ P Atom = 0xc01
+ Param Atom = 0xc05
+ Pattern Atom = 0x5107
+ Ping Atom = 0x7704
+ Placeholder Atom = 0xc30b
+ Plaintext Atom = 0xfd09
+ Poster Atom = 0x15706
+ Pre Atom = 0x25e03
+ Preload Atom = 0x25e07
+ Progress Atom = 0x52c08
+ Prompt Atom = 0x5fa06
+ Public Atom = 0x41e06
+ Q Atom = 0x13101
+ Radiogroup Atom = 0x30a
+ Readonly Atom = 0x2fb08
+ Rel Atom = 0x25f03
+ Required Atom = 0x1d008
+ Reversed Atom = 0x5a08
+ Rows Atom = 0x9204
+ Rowspan Atom = 0x9207
+ Rp Atom = 0x1c602
+ Rt Atom = 0x13f02
+ Ruby Atom = 0xaf04
+ S Atom = 0x2c01
+ Samp Atom = 0x4e04
+ Sandbox Atom = 0xbb07
+ Scope Atom = 0x2bd05
+ Scoped Atom = 0x2bd06
+ Script Atom = 0x3d406
+ Seamless Atom = 0x31c08
+ Section Atom = 0x4e207
+ Select Atom = 0x57a06
+ Selected Atom = 0x57a08
+ Shape Atom = 0x4f905
+ Size Atom = 0x55504
+ Sizes Atom = 0x55505
+ Small Atom = 0x18f05
+ Sortable Atom = 0x58d08
+ Sorted Atom = 0x19906
+ Source Atom = 0x1aa06
+ Spacer Atom = 0x2db06
+ Span Atom = 0x9504
+ Spellcheck Atom = 0x3230a
+ Src Atom = 0x3c303
+ Srcdoc Atom = 0x3c306
+ Srclang Atom = 0x41107
+ Start Atom = 0x38605
+ Step Atom = 0x5f704
+ Strike Atom = 0x53306
+ Strong Atom = 0x55906
+ Style Atom = 0x61105
+ Sub Atom = 0x5a903
+ Summary Atom = 0x61607
+ Sup Atom = 0x61d03
+ Svg Atom = 0x62003
+ System Atom = 0x62306
+ Tabindex Atom = 0x46308
+ Table Atom = 0x42d05
+ Target Atom = 0x24b06
+ Tbody Atom = 0x2e05
+ Td Atom = 0x4702
+ Template Atom = 0x62608
+ Textarea Atom = 0x2f608
+ Tfoot Atom = 0x8c05
+ Th Atom = 0x22e02
+ Thead Atom = 0x2d405
+ Time Atom = 0xdd04
+ Title Atom = 0xa105
+ Tr Atom = 0x10502
+ Track Atom = 0x10505
+ Translate Atom = 0x14009
+ Tt Atom = 0x5302
+ Type Atom = 0x21404
+ Typemustmatch Atom = 0x2140d
+ U Atom = 0xb01
+ Ul Atom = 0x8a02
+ Usemap Atom = 0x51106
+ Value Atom = 0x4005
+ Var Atom = 0x11503
+ Video Atom = 0x28105
+ Wbr Atom = 0x12103
+ Width Atom = 0x50705
+ Wrap Atom = 0x58704
+ Xmp Atom = 0xc103
+)
+
+const hash0 = 0xc17da63e
+
+const maxAtomLen = 19
+
+var table = [1 << 9]Atom{
+ 0x1: 0x48a0b, // onmousemove
+ 0x2: 0x5e209, // onwaiting
+ 0x3: 0x1fa13, // onautocompleteerror
+ 0x4: 0x5fa06, // prompt
+ 0x7: 0x5eb07, // optimum
+ 0x8: 0x1604, // mark
+ 0xa: 0x5ad07, // itemref
+ 0xb: 0x4fe0a, // onpageshow
+ 0xc: 0x57a06, // select
+ 0xd: 0x17b09, // draggable
+ 0xe: 0x3e03, // nav
+ 0xf: 0x17507, // command
+ 0x11: 0xb01, // u
+ 0x14: 0x2d507, // headers
+ 0x15: 0x44a08, // datalist
+ 0x17: 0x4e04, // samp
+ 0x1a: 0x3fb09, // onkeydown
+ 0x1b: 0x55f08, // onscroll
+ 0x1c: 0x15003, // col
+ 0x20: 0x3c908, // itemprop
+ 0x21: 0x2780a, // http-equiv
+ 0x22: 0x61d03, // sup
+ 0x24: 0x1d008, // required
+ 0x2b: 0x25e07, // preload
+ 0x2c: 0x6040d, // onbeforeprint
+ 0x2d: 0x3600b, // ondragenter
+ 0x2e: 0x50902, // dt
+ 0x2f: 0x5a708, // onsubmit
+ 0x30: 0x27002, // hr
+ 0x31: 0x32f0d, // oncontextmenu
+ 0x33: 0x29c05, // image
+ 0x34: 0x50d07, // onpause
+ 0x35: 0x25906, // hgroup
+ 0x36: 0x7704, // ping
+ 0x37: 0x57808, // onselect
+ 0x3a: 0x11303, // div
+ 0x3b: 0x1fa0e, // onautocomplete
+ 0x40: 0x2eb02, // mi
+ 0x41: 0x31c08, // seamless
+ 0x42: 0x2807, // charset
+ 0x43: 0x8502, // id
+ 0x44: 0x5200a, // onpopstate
+ 0x45: 0x3ef03, // del
+ 0x46: 0x2cb07, // marquee
+ 0x47: 0x3309, // accesskey
+ 0x49: 0x8d06, // footer
+ 0x4a: 0x44e04, // list
+ 0x4b: 0x2b005, // ismap
+ 0x51: 0x33804, // menu
+ 0x52: 0x2f04, // body
+ 0x55: 0x9a08, // frameset
+ 0x56: 0x54a07, // onreset
+ 0x57: 0x12705, // blink
+ 0x58: 0xa105, // title
+ 0x59: 0x38807, // article
+ 0x5b: 0x22e02, // th
+ 0x5d: 0x13101, // q
+ 0x5e: 0x3cf04, // open
+ 0x5f: 0x2fa04, // area
+ 0x61: 0x44206, // onload
+ 0x62: 0xda04, // font
+ 0x63: 0xd604, // base
+ 0x64: 0x16207, // colspan
+ 0x65: 0x53707, // keytype
+ 0x66: 0x11e02, // dl
+ 0x68: 0x1b008, // fieldset
+ 0x6a: 0x2eb03, // min
+ 0x6b: 0x11503, // var
+ 0x6f: 0x2d506, // header
+ 0x70: 0x13f02, // rt
+ 0x71: 0x15008, // colgroup
+ 0x72: 0x23502, // mn
+ 0x74: 0x13a07, // onabort
+ 0x75: 0x3906, // keygen
+ 0x76: 0x4c209, // onoffline
+ 0x77: 0x21f09, // challenge
+ 0x78: 0x2b203, // map
+ 0x7a: 0x2e902, // h4
+ 0x7b: 0x3b607, // onerror
+ 0x7c: 0x2e109, // maxlength
+ 0x7d: 0x2f505, // mtext
+ 0x7e: 0xbb07, // sandbox
+ 0x7f: 0x58b06, // onsort
+ 0x80: 0x100a, // malignmark
+ 0x81: 0x45d04, // meta
+ 0x82: 0x7b05, // async
+ 0x83: 0x2a702, // h3
+ 0x84: 0x26702, // dd
+ 0x85: 0x27004, // href
+ 0x86: 0x6e0a, // mediagroup
+ 0x87: 0x19406, // coords
+ 0x88: 0x41107, // srclang
+ 0x89: 0x34d0a, // ondblclick
+ 0x8a: 0x4005, // value
+ 0x8c: 0xe908, // oncancel
+ 0x8e: 0x3230a, // spellcheck
+ 0x8f: 0x9a05, // frame
+ 0x91: 0x12403, // big
+ 0x94: 0x1f606, // action
+ 0x95: 0x6903, // dir
+ 0x97: 0x2fb08, // readonly
+ 0x99: 0x42d05, // table
+ 0x9a: 0x61607, // summary
+ 0x9b: 0x12103, // wbr
+ 0x9c: 0x30a, // radiogroup
+ 0x9d: 0x6c04, // name
+ 0x9f: 0x62306, // system
+ 0xa1: 0x15d05, // color
+ 0xa2: 0x7f06, // canvas
+ 0xa3: 0x25504, // html
+ 0xa5: 0x56f09, // onseeking
+ 0xac: 0x4f905, // shape
+ 0xad: 0x25f03, // rel
+ 0xae: 0x28510, // oncanplaythrough
+ 0xaf: 0x3760a, // ondragover
+ 0xb0: 0x62608, // template
+ 0xb1: 0x1d80d, // foreignObject
+ 0xb3: 0x9204, // rows
+ 0xb6: 0x44e07, // listing
+ 0xb7: 0x49c06, // output
+ 0xb9: 0x3310b, // contextmenu
+ 0xbb: 0x11f03, // low
+ 0xbc: 0x1c602, // rp
+ 0xbd: 0x5bb09, // onsuspend
+ 0xbe: 0x13606, // button
+ 0xbf: 0x4db04, // desc
+ 0xc1: 0x4e207, // section
+ 0xc2: 0x52a0a, // onprogress
+ 0xc3: 0x59e09, // onstorage
+ 0xc4: 0x2d204, // math
+ 0xc5: 0x4503, // alt
+ 0xc7: 0x8a02, // ul
+ 0xc8: 0x5107, // pattern
+ 0xc9: 0x4b60c, // onmousewheel
+ 0xca: 0x35709, // ondragend
+ 0xcb: 0xaf04, // ruby
+ 0xcc: 0xc01, // p
+ 0xcd: 0x31707, // onclose
+ 0xce: 0x24205, // meter
+ 0xcf: 0x11807, // bgsound
+ 0xd2: 0x25106, // height
+ 0xd4: 0x101, // b
+ 0xd5: 0x2c308, // itemtype
+ 0xd8: 0x1bb07, // caption
+ 0xd9: 0x10c08, // disabled
+ 0xdb: 0x33808, // menuitem
+ 0xdc: 0x62003, // svg
+ 0xdd: 0x18f05, // small
+ 0xde: 0x44a04, // data
+ 0xe0: 0x4cb08, // ononline
+ 0xe1: 0x2a206, // mglyph
+ 0xe3: 0x6505, // embed
+ 0xe4: 0x10502, // tr
+ 0xe5: 0x46b0b, // onloadstart
+ 0xe7: 0x3c306, // srcdoc
+ 0xeb: 0x5c408, // ontoggle
+ 0xed: 0xe703, // bdo
+ 0xee: 0x4702, // td
+ 0xef: 0x8305, // aside
+ 0xf0: 0x29402, // h2
+ 0xf1: 0x52c08, // progress
+ 0xf2: 0x12c0a, // blockquote
+ 0xf4: 0xf005, // label
+ 0xf5: 0x601, // i
+ 0xf7: 0x9207, // rowspan
+ 0xfb: 0x51709, // onplaying
+ 0xfd: 0x2a103, // img
+ 0xfe: 0xf608, // optgroup
+ 0xff: 0x42307, // content
+ 0x101: 0x53e0c, // onratechange
+ 0x103: 0x3da0c, // onhashchange
+ 0x104: 0x4807, // details
+ 0x106: 0x40008, // download
+ 0x109: 0x14009, // translate
+ 0x10b: 0x4230f, // contenteditable
+ 0x10d: 0x36b0b, // ondragleave
+ 0x10e: 0x2106, // accept
+ 0x10f: 0x57a08, // selected
+ 0x112: 0x1f20a, // formaction
+ 0x113: 0x5b506, // center
+ 0x115: 0x45510, // onloadedmetadata
+ 0x116: 0x12804, // link
+ 0x117: 0xdd04, // time
+ 0x118: 0x19f0b, // crossorigin
+ 0x119: 0x3bd07, // onfocus
+ 0x11a: 0x58704, // wrap
+ 0x11b: 0x42204, // icon
+ 0x11d: 0x28105, // video
+ 0x11e: 0x4de05, // class
+ 0x121: 0x5d40e, // onvolumechange
+ 0x122: 0xaa06, // onblur
+ 0x123: 0x2b909, // itemscope
+ 0x124: 0x61105, // style
+ 0x127: 0x41e06, // public
+ 0x129: 0x2320e, // formnovalidate
+ 0x12a: 0x58206, // onshow
+ 0x12c: 0x51706, // onplay
+ 0x12d: 0x3c804, // cite
+ 0x12e: 0x2bc02, // ms
+ 0x12f: 0xdb0c, // ontimeupdate
+ 0x130: 0x10904, // kind
+ 0x131: 0x2470a, // formtarget
+ 0x135: 0x3af07, // onended
+ 0x136: 0x26506, // hidden
+ 0x137: 0x2c01, // s
+ 0x139: 0x2280a, // formmethod
+ 0x13a: 0x3e805, // input
+ 0x13c: 0x50b02, // h6
+ 0x13d: 0xc902, // ol
+ 0x13e: 0x3420b, // oncuechange
+ 0x13f: 0x1e50d, // foreignobject
+ 0x143: 0x4e70e, // onbeforeunload
+ 0x144: 0x2bd05, // scope
+ 0x145: 0x39609, // onemptied
+ 0x146: 0x14b05, // defer
+ 0x147: 0xc103, // xmp
+ 0x148: 0x39f10, // ondurationchange
+ 0x149: 0x1903, // kbd
+ 0x14c: 0x47609, // onmessage
+ 0x14d: 0x60006, // option
+ 0x14e: 0x2eb09, // minlength
+ 0x14f: 0x32807, // checked
+ 0x150: 0xce08, // autoplay
+ 0x152: 0x202, // br
+ 0x153: 0x2360a, // novalidate
+ 0x156: 0x6307, // noembed
+ 0x159: 0x31007, // onclick
+ 0x15a: 0x47f0b, // onmousedown
+ 0x15b: 0x3a708, // onchange
+ 0x15e: 0x3f209, // oninvalid
+ 0x15f: 0x2bd06, // scoped
+ 0x160: 0x18808, // controls
+ 0x161: 0x30b05, // muted
+ 0x162: 0x58d08, // sortable
+ 0x163: 0x51106, // usemap
+ 0x164: 0x1b80a, // figcaption
+ 0x165: 0x35706, // ondrag
+ 0x166: 0x26b04, // high
+ 0x168: 0x3c303, // src
+ 0x169: 0x15706, // poster
+ 0x16b: 0x1670e, // annotation-xml
+ 0x16c: 0x5f704, // step
+ 0x16d: 0x4, // abbr
+ 0x16e: 0x1b06, // dialog
+ 0x170: 0x1202, // li
+ 0x172: 0x3ed02, // mo
+ 0x175: 0x1d803, // for
+ 0x176: 0x1a803, // ins
+ 0x178: 0x55504, // size
+ 0x179: 0x43210, // onlanguagechange
+ 0x17a: 0x8607, // default
+ 0x17b: 0x1a03, // bdi
+ 0x17c: 0x4d30a, // onpagehide
+ 0x17d: 0x6907, // dirname
+ 0x17e: 0x21404, // type
+ 0x17f: 0x1f204, // form
+ 0x181: 0x28509, // oncanplay
+ 0x182: 0x6103, // dfn
+ 0x183: 0x46308, // tabindex
+ 0x186: 0x6502, // em
+ 0x187: 0x27404, // lang
+ 0x189: 0x39108, // dropzone
+ 0x18a: 0x4080a, // onkeypress
+ 0x18b: 0x23c08, // datetime
+ 0x18c: 0x16204, // cols
+ 0x18d: 0x1, // a
+ 0x18e: 0x4420c, // onloadeddata
+ 0x190: 0xa605, // audio
+ 0x192: 0x2e05, // tbody
+ 0x193: 0x22c06, // method
+ 0x195: 0xf404, // loop
+ 0x196: 0x29606, // iframe
+ 0x198: 0x2d504, // head
+ 0x19e: 0x5f108, // manifest
+ 0x19f: 0xb309, // autofocus
+ 0x1a0: 0x14904, // code
+ 0x1a1: 0x55906, // strong
+ 0x1a2: 0x30308, // multiple
+ 0x1a3: 0xc05, // param
+ 0x1a6: 0x21107, // enctype
+ 0x1a7: 0x5b304, // face
+ 0x1a8: 0xfd09, // plaintext
+ 0x1a9: 0x26e02, // h1
+ 0x1aa: 0x59509, // onstalled
+ 0x1ad: 0x3d406, // script
+ 0x1ae: 0x2db06, // spacer
+ 0x1af: 0x55108, // onresize
+ 0x1b0: 0x4a20b, // onmouseover
+ 0x1b1: 0x5cc08, // onunload
+ 0x1b2: 0x56708, // onseeked
+ 0x1b4: 0x2140d, // typemustmatch
+ 0x1b5: 0x1cc06, // figure
+ 0x1b6: 0x4950a, // onmouseout
+ 0x1b7: 0x25e03, // pre
+ 0x1b8: 0x50705, // width
+ 0x1b9: 0x19906, // sorted
+ 0x1bb: 0x5704, // nobr
+ 0x1be: 0x5302, // tt
+ 0x1bf: 0x1105, // align
+ 0x1c0: 0x3e607, // oninput
+ 0x1c3: 0x41807, // onkeyup
+ 0x1c6: 0x1c00c, // onafterprint
+ 0x1c7: 0x210e, // accept-charset
+ 0x1c8: 0x33c06, // itemid
+ 0x1c9: 0x3e809, // inputmode
+ 0x1cb: 0x53306, // strike
+ 0x1cc: 0x5a903, // sub
+ 0x1cd: 0x10505, // track
+ 0x1ce: 0x38605, // start
+ 0x1d0: 0xd608, // basefont
+ 0x1d6: 0x1aa06, // source
+ 0x1d7: 0x18206, // legend
+ 0x1d8: 0x2d405, // thead
+ 0x1da: 0x8c05, // tfoot
+ 0x1dd: 0x1ec06, // object
+ 0x1de: 0x6e05, // media
+ 0x1df: 0x1670a, // annotation
+ 0x1e0: 0x20d0b, // formenctype
+ 0x1e2: 0x3d208, // noscript
+ 0x1e4: 0x55505, // sizes
+ 0x1e5: 0x1fc0c, // autocomplete
+ 0x1e6: 0x9504, // span
+ 0x1e7: 0x9808, // noframes
+ 0x1e8: 0x24b06, // target
+ 0x1e9: 0x38f06, // ondrop
+ 0x1ea: 0x2b306, // applet
+ 0x1ec: 0x5a08, // reversed
+ 0x1f0: 0x2a907, // isindex
+ 0x1f3: 0x27008, // hreflang
+ 0x1f5: 0x2f302, // h5
+ 0x1f6: 0x4f307, // address
+ 0x1fa: 0x2e103, // max
+ 0x1fb: 0xc30b, // placeholder
+ 0x1fc: 0x2f608, // textarea
+ 0x1fe: 0x4ad09, // onmouseup
+ 0x1ff: 0x3800b, // ondragstart
+}
+
+const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" +
+ "genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" +
+ "ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" +
+ "utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" +
+ "labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" +
+ "blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" +
+ "nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" +
+ "originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" +
+ "bjectforeignobjectformactionautocompleteerrorformenctypemust" +
+ "matchallengeformmethodformnovalidatetimeterformtargetheightm" +
+ "lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" +
+ "h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" +
+ "eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" +
+ "utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" +
+ "hangeondblclickondragendondragenterondragleaveondragoverondr" +
+ "agstarticleondropzonemptiedondurationchangeonendedonerroronf" +
+ "ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" +
+ "nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" +
+ "uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" +
+ "rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" +
+ "ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" +
+ "oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" +
+ "teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" +
+ "ollonseekedonseekingonselectedonshowraponsortableonstalledon" +
+ "storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" +
+ "changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" +
+ "mmarysupsvgsystemplate"
diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go
new file mode 100644
index 000000000..13bed1599
--- /dev/null
+++ b/vendor/golang.org/x/net/html/charset/charset.go
@@ -0,0 +1,257 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package charset provides common text encodings for HTML documents.
+//
+// The mapping from encoding labels to encodings is defined at
+// https://encoding.spec.whatwg.org/.
+package charset // import "golang.org/x/net/html/charset"
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "mime"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/net/html"
+ "golang.org/x/text/encoding"
+ "golang.org/x/text/encoding/charmap"
+ "golang.org/x/text/encoding/htmlindex"
+ "golang.org/x/text/transform"
+)
+
+// Lookup returns the encoding with the specified label, and its canonical
+// name. It returns nil and the empty string if label is not one of the
+// standard encodings for HTML. Matching is case-insensitive and ignores
+// leading and trailing whitespace. Encoders will use HTML escape sequences for
+// runes that are not supported by the character set.
+func Lookup(label string) (e encoding.Encoding, name string) {
+ e, err := htmlindex.Get(label)
+ if err != nil {
+ return nil, ""
+ }
+ name, _ = htmlindex.Name(e)
+ return &htmlEncoding{e}, name
+}
+
+type htmlEncoding struct{ encoding.Encoding }
+
+func (h *htmlEncoding) NewEncoder() *encoding.Encoder {
+ // HTML requires a non-terminating legacy encoder. We use HTML escapes to
+ // substitute unsupported code points.
+ return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder())
+}
+
+// DetermineEncoding determines the encoding of an HTML document by examining
+// up to the first 1024 bytes of content and the declared Content-Type.
+//
+// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding
+func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) {
+ if len(content) > 1024 {
+ content = content[:1024]
+ }
+
+ for _, b := range boms {
+ if bytes.HasPrefix(content, b.bom) {
+ e, name = Lookup(b.enc)
+ return e, name, true
+ }
+ }
+
+ if _, params, err := mime.ParseMediaType(contentType); err == nil {
+ if cs, ok := params["charset"]; ok {
+ if e, name = Lookup(cs); e != nil {
+ return e, name, true
+ }
+ }
+ }
+
+ if len(content) > 0 {
+ e, name = prescan(content)
+ if e != nil {
+ return e, name, false
+ }
+ }
+
+ // Try to detect UTF-8.
+ // First eliminate any partial rune at the end.
+ for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
+ b := content[i]
+ if b < 0x80 {
+ break
+ }
+ if utf8.RuneStart(b) {
+ content = content[:i]
+ break
+ }
+ }
+ hasHighBit := false
+ for _, c := range content {
+ if c >= 0x80 {
+ hasHighBit = true
+ break
+ }
+ }
+ if hasHighBit && utf8.Valid(content) {
+ return encoding.Nop, "utf-8", false
+ }
+
+ // TODO: change default depending on user's locale?
+ return charmap.Windows1252, "windows-1252", false
+}
+
+// NewReader returns an io.Reader that converts the content of r to UTF-8.
+// It calls DetermineEncoding to find out what r's encoding is.
+func NewReader(r io.Reader, contentType string) (io.Reader, error) {
+ preview := make([]byte, 1024)
+ n, err := io.ReadFull(r, preview)
+ switch {
+ case err == io.ErrUnexpectedEOF:
+ preview = preview[:n]
+ r = bytes.NewReader(preview)
+ case err != nil:
+ return nil, err
+ default:
+ r = io.MultiReader(bytes.NewReader(preview), r)
+ }
+
+ if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop {
+ r = transform.NewReader(r, e.NewDecoder())
+ }
+ return r, nil
+}
+
+// NewReaderLabel returns a reader that converts from the specified charset to
+// UTF-8. It uses Lookup to find the encoding that corresponds to label, and
+// returns an error if Lookup returns nil. It is suitable for use as
+// encoding/xml.Decoder's CharsetReader function.
+func NewReaderLabel(label string, input io.Reader) (io.Reader, error) {
+ e, _ := Lookup(label)
+ if e == nil {
+ return nil, fmt.Errorf("unsupported charset: %q", label)
+ }
+ return transform.NewReader(input, e.NewDecoder()), nil
+}
+
+func prescan(content []byte) (e encoding.Encoding, name string) {
+ z := html.NewTokenizer(bytes.NewReader(content))
+ for {
+ switch z.Next() {
+ case html.ErrorToken:
+ return nil, ""
+
+ case html.StartTagToken, html.SelfClosingTagToken:
+ tagName, hasAttr := z.TagName()
+ if !bytes.Equal(tagName, []byte("meta")) {
+ continue
+ }
+ attrList := make(map[string]bool)
+ gotPragma := false
+
+ const (
+ dontKnow = iota
+ doNeedPragma
+ doNotNeedPragma
+ )
+ needPragma := dontKnow
+
+ name = ""
+ e = nil
+ for hasAttr {
+ var key, val []byte
+ key, val, hasAttr = z.TagAttr()
+ ks := string(key)
+ if attrList[ks] {
+ continue
+ }
+ attrList[ks] = true
+ for i, c := range val {
+ if 'A' <= c && c <= 'Z' {
+ val[i] = c + 0x20
+ }
+ }
+
+ switch ks {
+ case "http-equiv":
+ if bytes.Equal(val, []byte("content-type")) {
+ gotPragma = true
+ }
+
+ case "content":
+ if e == nil {
+ name = fromMetaElement(string(val))
+ if name != "" {
+ e, name = Lookup(name)
+ if e != nil {
+ needPragma = doNeedPragma
+ }
+ }
+ }
+
+ case "charset":
+ e, name = Lookup(string(val))
+ needPragma = doNotNeedPragma
+ }
+ }
+
+ if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
+ continue
+ }
+
+ if strings.HasPrefix(name, "utf-16") {
+ name = "utf-8"
+ e = encoding.Nop
+ }
+
+ if e != nil {
+ return e, name
+ }
+ }
+ }
+}
+
+func fromMetaElement(s string) string {
+ for s != "" {
+ csLoc := strings.Index(s, "charset")
+ if csLoc == -1 {
+ return ""
+ }
+ s = s[csLoc+len("charset"):]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if !strings.HasPrefix(s, "=") {
+ continue
+ }
+ s = s[1:]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if s == "" {
+ return ""
+ }
+ if q := s[0]; q == '"' || q == '\'' {
+ s = s[1:]
+ closeQuote := strings.IndexRune(s, rune(q))
+ if closeQuote == -1 {
+ return ""
+ }
+ return s[:closeQuote]
+ }
+
+ end := strings.IndexAny(s, "; \t\n\f\r")
+ if end == -1 {
+ end = len(s)
+ }
+ return s[:end]
+ }
+ return ""
+}
+
+var boms = []struct {
+ bom []byte
+ enc string
+}{
+ {[]byte{0xfe, 0xff}, "utf-16be"},
+ {[]byte{0xff, 0xfe}, "utf-16le"},
+ {[]byte{0xef, 0xbb, 0xbf}, "utf-8"},
+}
diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go
new file mode 100644
index 000000000..52f651ff6
--- /dev/null
+++ b/vendor/golang.org/x/net/html/const.go
@@ -0,0 +1,102 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+// Section 12.2.3.2 of the HTML5 specification says "The following elements
+// have varying levels of special parsing rules".
+// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
+var isSpecialElementMap = map[string]bool{
+ "address": true,
+ "applet": true,
+ "area": true,
+ "article": true,
+ "aside": true,
+ "base": true,
+ "basefont": true,
+ "bgsound": true,
+ "blockquote": true,
+ "body": true,
+ "br": true,
+ "button": true,
+ "caption": true,
+ "center": true,
+ "col": true,
+ "colgroup": true,
+ "dd": true,
+ "details": true,
+ "dir": true,
+ "div": true,
+ "dl": true,
+ "dt": true,
+ "embed": true,
+ "fieldset": true,
+ "figcaption": true,
+ "figure": true,
+ "footer": true,
+ "form": true,
+ "frame": true,
+ "frameset": true,
+ "h1": true,
+ "h2": true,
+ "h3": true,
+ "h4": true,
+ "h5": true,
+ "h6": true,
+ "head": true,
+ "header": true,
+ "hgroup": true,
+ "hr": true,
+ "html": true,
+ "iframe": true,
+ "img": true,
+ "input": true,
+ "isindex": true,
+ "li": true,
+ "link": true,
+ "listing": true,
+ "marquee": true,
+ "menu": true,
+ "meta": true,
+ "nav": true,
+ "noembed": true,
+ "noframes": true,
+ "noscript": true,
+ "object": true,
+ "ol": true,
+ "p": true,
+ "param": true,
+ "plaintext": true,
+ "pre": true,
+ "script": true,
+ "section": true,
+ "select": true,
+ "source": true,
+ "style": true,
+ "summary": true,
+ "table": true,
+ "tbody": true,
+ "td": true,
+ "template": true,
+ "textarea": true,
+ "tfoot": true,
+ "th": true,
+ "thead": true,
+ "title": true,
+ "tr": true,
+ "track": true,
+ "ul": true,
+ "wbr": true,
+ "xmp": true,
+}
+
+func isSpecialElement(element *Node) bool {
+ switch element.Namespace {
+ case "", "html":
+ return isSpecialElementMap[element.Data]
+ case "svg":
+ return element.Data == "foreignObject"
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go
new file mode 100644
index 000000000..94f496874
--- /dev/null
+++ b/vendor/golang.org/x/net/html/doc.go
@@ -0,0 +1,106 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package html implements an HTML5-compliant tokenizer and parser.
+
+Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
+caller's responsibility to ensure that r provides UTF-8 encoded HTML.
+
+ z := html.NewTokenizer(r)
+
+Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
+which parses the next token and returns its type, or an error:
+
+ for {
+ tt := z.Next()
+ if tt == html.ErrorToken {
+ // ...
+ return ...
+ }
+ // Process the current token.
+ }
+
+There are two APIs for retrieving the current token. The high-level API is to
+call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
+allow optionally calling Raw after Next but before Token, Text, TagName, or
+TagAttr. In EBNF notation, the valid call sequence per token is:
+
+ Next {Raw} [ Token | Text | TagName {TagAttr} ]
+
+Token returns an independent data structure that completely describes a token.
+Entities (such as "&lt;") are unescaped, tag names and attribute keys are
+lower-cased, and attributes are collected into a []Attribute. For example:
+
+ for {
+ if z.Next() == html.ErrorToken {
+ // Returning io.EOF indicates success.
+ return z.Err()
+ }
+ emitToken(z.Token())
+ }
+
+The low-level API performs fewer allocations and copies, but the contents of
+the []byte values returned by Text, TagName and TagAttr may change on the next
+call to Next. For example, to extract an HTML page's anchor text:
+
+ depth := 0
+ for {
+ tt := z.Next()
+ switch tt {
+ case ErrorToken:
+ return z.Err()
+ case TextToken:
+ if depth > 0 {
+ // emitBytes should copy the []byte it receives,
+ // if it doesn't process it immediately.
+ emitBytes(z.Text())
+ }
+ case StartTagToken, EndTagToken:
+ tn, _ := z.TagName()
+ if len(tn) == 1 && tn[0] == 'a' {
+ if tt == StartTagToken {
+ depth++
+ } else {
+ depth--
+ }
+ }
+ }
+ }
+
+Parsing is done by calling Parse with an io.Reader, which returns the root of
+the parse tree (the document element) as a *Node. It is the caller's
+responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
+example, to process each anchor node in depth-first order:
+
+ doc, err := html.Parse(r)
+ if err != nil {
+ // ...
+ }
+ var f func(*html.Node)
+ f = func(n *html.Node) {
+ if n.Type == html.ElementNode && n.Data == "a" {
+ // Do something with n...
+ }
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ f(c)
+ }
+ }
+ f(doc)
+
+The relevant specifications include:
+https://html.spec.whatwg.org/multipage/syntax.html and
+https://html.spec.whatwg.org/multipage/syntax.html#tokenization
+*/
+package html // import "golang.org/x/net/html"
+
+// The tokenization algorithm implemented by this package is not a line-by-line
+// transliteration of the relatively verbose state-machine in the WHATWG
+// specification. A more direct approach is used instead, where the program
+// counter implies the state, such as whether it is tokenizing a tag or a text
+// node. Specification compliance is verified by checking expected and actual
+// outputs over a test suite rather than aiming for algorithmic fidelity.
+
+// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
+// TODO(nigeltao): How does parsing interact with a JavaScript engine?
diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go
new file mode 100644
index 000000000..c484e5a94
--- /dev/null
+++ b/vendor/golang.org/x/net/html/doctype.go
@@ -0,0 +1,156 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "strings"
+)
+
+// parseDoctype parses the data from a DoctypeToken into a name,
+// public identifier, and system identifier. It returns a Node whose Type
+// is DoctypeNode, whose Data is the name, and which has attributes
+// named "system" and "public" for the two identifiers if they were present.
+// quirks is whether the document should be parsed in "quirks mode".
+func parseDoctype(s string) (n *Node, quirks bool) {
+ n = &Node{Type: DoctypeNode}
+
+ // Find the name.
+ space := strings.IndexAny(s, whitespace)
+ if space == -1 {
+ space = len(s)
+ }
+ n.Data = s[:space]
+ // The comparison to "html" is case-sensitive.
+ if n.Data != "html" {
+ quirks = true
+ }
+ n.Data = strings.ToLower(n.Data)
+ s = strings.TrimLeft(s[space:], whitespace)
+
+ if len(s) < 6 {
+ // It can't start with "PUBLIC" or "SYSTEM".
+ // Ignore the rest of the string.
+ return n, quirks || s != ""
+ }
+
+ key := strings.ToLower(s[:6])
+ s = s[6:]
+ for key == "public" || key == "system" {
+ s = strings.TrimLeft(s, whitespace)
+ if s == "" {
+ break
+ }
+ quote := s[0]
+ if quote != '"' && quote != '\'' {
+ break
+ }
+ s = s[1:]
+ q := strings.IndexRune(s, rune(quote))
+ var id string
+ if q == -1 {
+ id = s
+ s = ""
+ } else {
+ id = s[:q]
+ s = s[q+1:]
+ }
+ n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
+ if key == "public" {
+ key = "system"
+ } else {
+ key = ""
+ }
+ }
+
+ if key != "" || s != "" {
+ quirks = true
+ } else if len(n.Attr) > 0 {
+ if n.Attr[0].Key == "public" {
+ public := strings.ToLower(n.Attr[0].Val)
+ switch public {
+ case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
+ quirks = true
+ default:
+ for _, q := range quirkyIDs {
+ if strings.HasPrefix(public, q) {
+ quirks = true
+ break
+ }
+ }
+ }
+ // The following two public IDs only cause quirks mode if there is no system ID.
+ if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
+ strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
+ quirks = true
+ }
+ }
+ if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
+ strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
+ quirks = true
+ }
+ }
+
+ return n, quirks
+}
+
+// quirkyIDs is a list of public doctype identifiers that cause a document
+// to be interpreted in quirks mode. The identifiers should be in lower case.
+var quirkyIDs = []string{
+ "+//silmaril//dtd html pro v0r11 19970101//",
+ "-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
+ "-//as//dtd html 3.0 aswedit + extensions//",
+ "-//ietf//dtd html 2.0 level 1//",
+ "-//ietf//dtd html 2.0 level 2//",
+ "-//ietf//dtd html 2.0 strict level 1//",
+ "-//ietf//dtd html 2.0 strict level 2//",
+ "-//ietf//dtd html 2.0 strict//",
+ "-//ietf//dtd html 2.0//",
+ "-//ietf//dtd html 2.1e//",
+ "-//ietf//dtd html 3.0//",
+ "-//ietf//dtd html 3.2 final//",
+ "-//ietf//dtd html 3.2//",
+ "-//ietf//dtd html 3//",
+ "-//ietf//dtd html level 0//",
+ "-//ietf//dtd html level 1//",
+ "-//ietf//dtd html level 2//",
+ "-//ietf//dtd html level 3//",
+ "-//ietf//dtd html strict level 0//",
+ "-//ietf//dtd html strict level 1//",
+ "-//ietf//dtd html strict level 2//",
+ "-//ietf//dtd html strict level 3//",
+ "-//ietf//dtd html strict//",
+ "-//ietf//dtd html//",
+ "-//metrius//dtd metrius presentational//",
+ "-//microsoft//dtd internet explorer 2.0 html strict//",
+ "-//microsoft//dtd internet explorer 2.0 html//",
+ "-//microsoft//dtd internet explorer 2.0 tables//",
+ "-//microsoft//dtd internet explorer 3.0 html strict//",
+ "-//microsoft//dtd internet explorer 3.0 html//",
+ "-//microsoft//dtd internet explorer 3.0 tables//",
+ "-//netscape comm. corp.//dtd html//",
+ "-//netscape comm. corp.//dtd strict html//",
+ "-//o'reilly and associates//dtd html 2.0//",
+ "-//o'reilly and associates//dtd html extended 1.0//",
+ "-//o'reilly and associates//dtd html extended relaxed 1.0//",
+ "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
+ "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
+ "-//spyglass//dtd html 2.0 extended//",
+ "-//sq//dtd html 2.0 hotmetal + extensions//",
+ "-//sun microsystems corp.//dtd hotjava html//",
+ "-//sun microsystems corp.//dtd hotjava strict html//",
+ "-//w3c//dtd html 3 1995-03-24//",
+ "-//w3c//dtd html 3.2 draft//",
+ "-//w3c//dtd html 3.2 final//",
+ "-//w3c//dtd html 3.2//",
+ "-//w3c//dtd html 3.2s draft//",
+ "-//w3c//dtd html 4.0 frameset//",
+ "-//w3c//dtd html 4.0 transitional//",
+ "-//w3c//dtd html experimental 19960712//",
+ "-//w3c//dtd html experimental 970421//",
+ "-//w3c//dtd w3 html//",
+ "-//w3o//dtd w3 html 3.0//",
+ "-//webtechs//dtd mozilla html 2.0//",
+ "-//webtechs//dtd mozilla html//",
+}
diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go
new file mode 100644
index 000000000..a50c04c60
--- /dev/null
+++ b/vendor/golang.org/x/net/html/entity.go
@@ -0,0 +1,2253 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+// All entities that do not end with ';' are 6 or fewer bytes long.
+const longestEntityWithoutSemicolon = 6
+
+// entity is a map from HTML entity names to their values. The semicolon matters:
+// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references
+// lists both "amp" and "amp;" as two separate entries.
+//
+// Note that the HTML5 list is larger than the HTML4 list at
+// http://www.w3.org/TR/html4/sgml/entities.html
+var entity = map[string]rune{
+ "AElig;": '\U000000C6',
+ "AMP;": '\U00000026',
+ "Aacute;": '\U000000C1',
+ "Abreve;": '\U00000102',
+ "Acirc;": '\U000000C2',
+ "Acy;": '\U00000410',
+ "Afr;": '\U0001D504',
+ "Agrave;": '\U000000C0',
+ "Alpha;": '\U00000391',
+ "Amacr;": '\U00000100',
+ "And;": '\U00002A53',
+ "Aogon;": '\U00000104',
+ "Aopf;": '\U0001D538',
+ "ApplyFunction;": '\U00002061',
+ "Aring;": '\U000000C5',
+ "Ascr;": '\U0001D49C',
+ "Assign;": '\U00002254',
+ "Atilde;": '\U000000C3',
+ "Auml;": '\U000000C4',
+ "Backslash;": '\U00002216',
+ "Barv;": '\U00002AE7',
+ "Barwed;": '\U00002306',
+ "Bcy;": '\U00000411',
+ "Because;": '\U00002235',
+ "Bernoullis;": '\U0000212C',
+ "Beta;": '\U00000392',
+ "Bfr;": '\U0001D505',
+ "Bopf;": '\U0001D539',
+ "Breve;": '\U000002D8',
+ "Bscr;": '\U0000212C',
+ "Bumpeq;": '\U0000224E',
+ "CHcy;": '\U00000427',
+ "COPY;": '\U000000A9',
+ "Cacute;": '\U00000106',
+ "Cap;": '\U000022D2',
+ "CapitalDifferentialD;": '\U00002145',
+ "Cayleys;": '\U0000212D',
+ "Ccaron;": '\U0000010C',
+ "Ccedil;": '\U000000C7',
+ "Ccirc;": '\U00000108',
+ "Cconint;": '\U00002230',
+ "Cdot;": '\U0000010A',
+ "Cedilla;": '\U000000B8',
+ "CenterDot;": '\U000000B7',
+ "Cfr;": '\U0000212D',
+ "Chi;": '\U000003A7',
+ "CircleDot;": '\U00002299',
+ "CircleMinus;": '\U00002296',
+ "CirclePlus;": '\U00002295',
+ "CircleTimes;": '\U00002297',
+ "ClockwiseContourIntegral;": '\U00002232',
+ "CloseCurlyDoubleQuote;": '\U0000201D',
+ "CloseCurlyQuote;": '\U00002019',
+ "Colon;": '\U00002237',
+ "Colone;": '\U00002A74',
+ "Congruent;": '\U00002261',
+ "Conint;": '\U0000222F',
+ "ContourIntegral;": '\U0000222E',
+ "Copf;": '\U00002102',
+ "Coproduct;": '\U00002210',
+ "CounterClockwiseContourIntegral;": '\U00002233',
+ "Cross;": '\U00002A2F',
+ "Cscr;": '\U0001D49E',
+ "Cup;": '\U000022D3',
+ "CupCap;": '\U0000224D',
+ "DD;": '\U00002145',
+ "DDotrahd;": '\U00002911',
+ "DJcy;": '\U00000402',
+ "DScy;": '\U00000405',
+ "DZcy;": '\U0000040F',
+ "Dagger;": '\U00002021',
+ "Darr;": '\U000021A1',
+ "Dashv;": '\U00002AE4',
+ "Dcaron;": '\U0000010E',
+ "Dcy;": '\U00000414',
+ "Del;": '\U00002207',
+ "Delta;": '\U00000394',
+ "Dfr;": '\U0001D507',
+ "DiacriticalAcute;": '\U000000B4',
+ "DiacriticalDot;": '\U000002D9',
+ "DiacriticalDoubleAcute;": '\U000002DD',
+ "DiacriticalGrave;": '\U00000060',
+ "DiacriticalTilde;": '\U000002DC',
+ "Diamond;": '\U000022C4',
+ "DifferentialD;": '\U00002146',
+ "Dopf;": '\U0001D53B',
+ "Dot;": '\U000000A8',
+ "DotDot;": '\U000020DC',
+ "DotEqual;": '\U00002250',
+ "DoubleContourIntegral;": '\U0000222F',
+ "DoubleDot;": '\U000000A8',
+ "DoubleDownArrow;": '\U000021D3',
+ "DoubleLeftArrow;": '\U000021D0',
+ "DoubleLeftRightArrow;": '\U000021D4',
+ "DoubleLeftTee;": '\U00002AE4',
+ "DoubleLongLeftArrow;": '\U000027F8',
+ "DoubleLongLeftRightArrow;": '\U000027FA',
+ "DoubleLongRightArrow;": '\U000027F9',
+ "DoubleRightArrow;": '\U000021D2',
+ "DoubleRightTee;": '\U000022A8',
+ "DoubleUpArrow;": '\U000021D1',
+ "DoubleUpDownArrow;": '\U000021D5',
+ "DoubleVerticalBar;": '\U00002225',
+ "DownArrow;": '\U00002193',
+ "DownArrowBar;": '\U00002913',
+ "DownArrowUpArrow;": '\U000021F5',
+ "DownBreve;": '\U00000311',
+ "DownLeftRightVector;": '\U00002950',
+ "DownLeftTeeVector;": '\U0000295E',
+ "DownLeftVector;": '\U000021BD',
+ "DownLeftVectorBar;": '\U00002956',
+ "DownRightTeeVector;": '\U0000295F',
+ "DownRightVector;": '\U000021C1',
+ "DownRightVectorBar;": '\U00002957',
+ "DownTee;": '\U000022A4',
+ "DownTeeArrow;": '\U000021A7',
+ "Downarrow;": '\U000021D3',
+ "Dscr;": '\U0001D49F',
+ "Dstrok;": '\U00000110',
+ "ENG;": '\U0000014A',
+ "ETH;": '\U000000D0',
+ "Eacute;": '\U000000C9',
+ "Ecaron;": '\U0000011A',
+ "Ecirc;": '\U000000CA',
+ "Ecy;": '\U0000042D',
+ "Edot;": '\U00000116',
+ "Efr;": '\U0001D508',
+ "Egrave;": '\U000000C8',
+ "Element;": '\U00002208',
+ "Emacr;": '\U00000112',
+ "EmptySmallSquare;": '\U000025FB',
+ "EmptyVerySmallSquare;": '\U000025AB',
+ "Eogon;": '\U00000118',
+ "Eopf;": '\U0001D53C',
+ "Epsilon;": '\U00000395',
+ "Equal;": '\U00002A75',
+ "EqualTilde;": '\U00002242',
+ "Equilibrium;": '\U000021CC',
+ "Escr;": '\U00002130',
+ "Esim;": '\U00002A73',
+ "Eta;": '\U00000397',
+ "Euml;": '\U000000CB',
+ "Exists;": '\U00002203',
+ "ExponentialE;": '\U00002147',
+ "Fcy;": '\U00000424',
+ "Ffr;": '\U0001D509',
+ "FilledSmallSquare;": '\U000025FC',
+ "FilledVerySmallSquare;": '\U000025AA',
+ "Fopf;": '\U0001D53D',
+ "ForAll;": '\U00002200',
+ "Fouriertrf;": '\U00002131',
+ "Fscr;": '\U00002131',
+ "GJcy;": '\U00000403',
+ "GT;": '\U0000003E',
+ "Gamma;": '\U00000393',
+ "Gammad;": '\U000003DC',
+ "Gbreve;": '\U0000011E',
+ "Gcedil;": '\U00000122',
+ "Gcirc;": '\U0000011C',
+ "Gcy;": '\U00000413',
+ "Gdot;": '\U00000120',
+ "Gfr;": '\U0001D50A',
+ "Gg;": '\U000022D9',
+ "Gopf;": '\U0001D53E',
+ "GreaterEqual;": '\U00002265',
+ "GreaterEqualLess;": '\U000022DB',
+ "GreaterFullEqual;": '\U00002267',
+ "GreaterGreater;": '\U00002AA2',
+ "GreaterLess;": '\U00002277',
+ "GreaterSlantEqual;": '\U00002A7E',
+ "GreaterTilde;": '\U00002273',
+ "Gscr;": '\U0001D4A2',
+ "Gt;": '\U0000226B',
+ "HARDcy;": '\U0000042A',
+ "Hacek;": '\U000002C7',
+ "Hat;": '\U0000005E',
+ "Hcirc;": '\U00000124',
+ "Hfr;": '\U0000210C',
+ "HilbertSpace;": '\U0000210B',
+ "Hopf;": '\U0000210D',
+ "HorizontalLine;": '\U00002500',
+ "Hscr;": '\U0000210B',
+ "Hstrok;": '\U00000126',
+ "HumpDownHump;": '\U0000224E',
+ "HumpEqual;": '\U0000224F',
+ "IEcy;": '\U00000415',
+ "IJlig;": '\U00000132',
+ "IOcy;": '\U00000401',
+ "Iacute;": '\U000000CD',
+ "Icirc;": '\U000000CE',
+ "Icy;": '\U00000418',
+ "Idot;": '\U00000130',
+ "Ifr;": '\U00002111',
+ "Igrave;": '\U000000CC',
+ "Im;": '\U00002111',
+ "Imacr;": '\U0000012A',
+ "ImaginaryI;": '\U00002148',
+ "Implies;": '\U000021D2',
+ "Int;": '\U0000222C',
+ "Integral;": '\U0000222B',
+ "Intersection;": '\U000022C2',
+ "InvisibleComma;": '\U00002063',
+ "InvisibleTimes;": '\U00002062',
+ "Iogon;": '\U0000012E',
+ "Iopf;": '\U0001D540',
+ "Iota;": '\U00000399',
+ "Iscr;": '\U00002110',
+ "Itilde;": '\U00000128',
+ "Iukcy;": '\U00000406',
+ "Iuml;": '\U000000CF',
+ "Jcirc;": '\U00000134',
+ "Jcy;": '\U00000419',
+ "Jfr;": '\U0001D50D',
+ "Jopf;": '\U0001D541',
+ "Jscr;": '\U0001D4A5',
+ "Jsercy;": '\U00000408',
+ "Jukcy;": '\U00000404',
+ "KHcy;": '\U00000425',
+ "KJcy;": '\U0000040C',
+ "Kappa;": '\U0000039A',
+ "Kcedil;": '\U00000136',
+ "Kcy;": '\U0000041A',
+ "Kfr;": '\U0001D50E',
+ "Kopf;": '\U0001D542',
+ "Kscr;": '\U0001D4A6',
+ "LJcy;": '\U00000409',
+ "LT;": '\U0000003C',
+ "Lacute;": '\U00000139',
+ "Lambda;": '\U0000039B',
+ "Lang;": '\U000027EA',
+ "Laplacetrf;": '\U00002112',
+ "Larr;": '\U0000219E',
+ "Lcaron;": '\U0000013D',
+ "Lcedil;": '\U0000013B',
+ "Lcy;": '\U0000041B',
+ "LeftAngleBracket;": '\U000027E8',
+ "LeftArrow;": '\U00002190',
+ "LeftArrowBar;": '\U000021E4',
+ "LeftArrowRightArrow;": '\U000021C6',
+ "LeftCeiling;": '\U00002308',
+ "LeftDoubleBracket;": '\U000027E6',
+ "LeftDownTeeVector;": '\U00002961',
+ "LeftDownVector;": '\U000021C3',
+ "LeftDownVectorBar;": '\U00002959',
+ "LeftFloor;": '\U0000230A',
+ "LeftRightArrow;": '\U00002194',
+ "LeftRightVector;": '\U0000294E',
+ "LeftTee;": '\U000022A3',
+ "LeftTeeArrow;": '\U000021A4',
+ "LeftTeeVector;": '\U0000295A',
+ "LeftTriangle;": '\U000022B2',
+ "LeftTriangleBar;": '\U000029CF',
+ "LeftTriangleEqual;": '\U000022B4',
+ "LeftUpDownVector;": '\U00002951',
+ "LeftUpTeeVector;": '\U00002960',
+ "LeftUpVector;": '\U000021BF',
+ "LeftUpVectorBar;": '\U00002958',
+ "LeftVector;": '\U000021BC',
+ "LeftVectorBar;": '\U00002952',
+ "Leftarrow;": '\U000021D0',
+ "Leftrightarrow;": '\U000021D4',
+ "LessEqualGreater;": '\U000022DA',
+ "LessFullEqual;": '\U00002266',
+ "LessGreater;": '\U00002276',
+ "LessLess;": '\U00002AA1',
+ "LessSlantEqual;": '\U00002A7D',
+ "LessTilde;": '\U00002272',
+ "Lfr;": '\U0001D50F',
+ "Ll;": '\U000022D8',
+ "Lleftarrow;": '\U000021DA',
+ "Lmidot;": '\U0000013F',
+ "LongLeftArrow;": '\U000027F5',
+ "LongLeftRightArrow;": '\U000027F7',
+ "LongRightArrow;": '\U000027F6',
+ "Longleftarrow;": '\U000027F8',
+ "Longleftrightarrow;": '\U000027FA',
+ "Longrightarrow;": '\U000027F9',
+ "Lopf;": '\U0001D543',
+ "LowerLeftArrow;": '\U00002199',
+ "LowerRightArrow;": '\U00002198',
+ "Lscr;": '\U00002112',
+ "Lsh;": '\U000021B0',
+ "Lstrok;": '\U00000141',
+ "Lt;": '\U0000226A',
+ "Map;": '\U00002905',
+ "Mcy;": '\U0000041C',
+ "MediumSpace;": '\U0000205F',
+ "Mellintrf;": '\U00002133',
+ "Mfr;": '\U0001D510',
+ "MinusPlus;": '\U00002213',
+ "Mopf;": '\U0001D544',
+ "Mscr;": '\U00002133',
+ "Mu;": '\U0000039C',
+ "NJcy;": '\U0000040A',
+ "Nacute;": '\U00000143',
+ "Ncaron;": '\U00000147',
+ "Ncedil;": '\U00000145',
+ "Ncy;": '\U0000041D',
+ "NegativeMediumSpace;": '\U0000200B',
+ "NegativeThickSpace;": '\U0000200B',
+ "NegativeThinSpace;": '\U0000200B',
+ "NegativeVeryThinSpace;": '\U0000200B',
+ "NestedGreaterGreater;": '\U0000226B',
+ "NestedLessLess;": '\U0000226A',
+ "NewLine;": '\U0000000A',
+ "Nfr;": '\U0001D511',
+ "NoBreak;": '\U00002060',
+ "NonBreakingSpace;": '\U000000A0',
+ "Nopf;": '\U00002115',
+ "Not;": '\U00002AEC',
+ "NotCongruent;": '\U00002262',
+ "NotCupCap;": '\U0000226D',
+ "NotDoubleVerticalBar;": '\U00002226',
+ "NotElement;": '\U00002209',
+ "NotEqual;": '\U00002260',
+ "NotExists;": '\U00002204',
+ "NotGreater;": '\U0000226F',
+ "NotGreaterEqual;": '\U00002271',
+ "NotGreaterLess;": '\U00002279',
+ "NotGreaterTilde;": '\U00002275',
+ "NotLeftTriangle;": '\U000022EA',
+ "NotLeftTriangleEqual;": '\U000022EC',
+ "NotLess;": '\U0000226E',
+ "NotLessEqual;": '\U00002270',
+ "NotLessGreater;": '\U00002278',
+ "NotLessTilde;": '\U00002274',
+ "NotPrecedes;": '\U00002280',
+ "NotPrecedesSlantEqual;": '\U000022E0',
+ "NotReverseElement;": '\U0000220C',
+ "NotRightTriangle;": '\U000022EB',
+ "NotRightTriangleEqual;": '\U000022ED',
+ "NotSquareSubsetEqual;": '\U000022E2',
+ "NotSquareSupersetEqual;": '\U000022E3',
+ "NotSubsetEqual;": '\U00002288',
+ "NotSucceeds;": '\U00002281',
+ "NotSucceedsSlantEqual;": '\U000022E1',
+ "NotSupersetEqual;": '\U00002289',
+ "NotTilde;": '\U00002241',
+ "NotTildeEqual;": '\U00002244',
+ "NotTildeFullEqual;": '\U00002247',
+ "NotTildeTilde;": '\U00002249',
+ "NotVerticalBar;": '\U00002224',
+ "Nscr;": '\U0001D4A9',
+ "Ntilde;": '\U000000D1',
+ "Nu;": '\U0000039D',
+ "OElig;": '\U00000152',
+ "Oacute;": '\U000000D3',
+ "Ocirc;": '\U000000D4',
+ "Ocy;": '\U0000041E',
+ "Odblac;": '\U00000150',
+ "Ofr;": '\U0001D512',
+ "Ograve;": '\U000000D2',
+ "Omacr;": '\U0000014C',
+ "Omega;": '\U000003A9',
+ "Omicron;": '\U0000039F',
+ "Oopf;": '\U0001D546',
+ "OpenCurlyDoubleQuote;": '\U0000201C',
+ "OpenCurlyQuote;": '\U00002018',
+ "Or;": '\U00002A54',
+ "Oscr;": '\U0001D4AA',
+ "Oslash;": '\U000000D8',
+ "Otilde;": '\U000000D5',
+ "Otimes;": '\U00002A37',
+ "Ouml;": '\U000000D6',
+ "OverBar;": '\U0000203E',
+ "OverBrace;": '\U000023DE',
+ "OverBracket;": '\U000023B4',
+ "OverParenthesis;": '\U000023DC',
+ "PartialD;": '\U00002202',
+ "Pcy;": '\U0000041F',
+ "Pfr;": '\U0001D513',
+ "Phi;": '\U000003A6',
+ "Pi;": '\U000003A0',
+ "PlusMinus;": '\U000000B1',
+ "Poincareplane;": '\U0000210C',
+ "Popf;": '\U00002119',
+ "Pr;": '\U00002ABB',
+ "Precedes;": '\U0000227A',
+ "PrecedesEqual;": '\U00002AAF',
+ "PrecedesSlantEqual;": '\U0000227C',
+ "PrecedesTilde;": '\U0000227E',
+ "Prime;": '\U00002033',
+ "Product;": '\U0000220F',
+ "Proportion;": '\U00002237',
+ "Proportional;": '\U0000221D',
+ "Pscr;": '\U0001D4AB',
+ "Psi;": '\U000003A8',
+ "QUOT;": '\U00000022',
+ "Qfr;": '\U0001D514',
+ "Qopf;": '\U0000211A',
+ "Qscr;": '\U0001D4AC',
+ "RBarr;": '\U00002910',
+ "REG;": '\U000000AE',
+ "Racute;": '\U00000154',
+ "Rang;": '\U000027EB',
+ "Rarr;": '\U000021A0',
+ "Rarrtl;": '\U00002916',
+ "Rcaron;": '\U00000158',
+ "Rcedil;": '\U00000156',
+ "Rcy;": '\U00000420',
+ "Re;": '\U0000211C',
+ "ReverseElement;": '\U0000220B',
+ "ReverseEquilibrium;": '\U000021CB',
+ "ReverseUpEquilibrium;": '\U0000296F',
+ "Rfr;": '\U0000211C',
+ "Rho;": '\U000003A1',
+ "RightAngleBracket;": '\U000027E9',
+ "RightArrow;": '\U00002192',
+ "RightArrowBar;": '\U000021E5',
+ "RightArrowLeftArrow;": '\U000021C4',
+ "RightCeiling;": '\U00002309',
+ "RightDoubleBracket;": '\U000027E7',
+ "RightDownTeeVector;": '\U0000295D',
+ "RightDownVector;": '\U000021C2',
+ "RightDownVectorBar;": '\U00002955',
+ "RightFloor;": '\U0000230B',
+ "RightTee;": '\U000022A2',
+ "RightTeeArrow;": '\U000021A6',
+ "RightTeeVector;": '\U0000295B',
+ "RightTriangle;": '\U000022B3',
+ "RightTriangleBar;": '\U000029D0',
+ "RightTriangleEqual;": '\U000022B5',
+ "RightUpDownVector;": '\U0000294F',
+ "RightUpTeeVector;": '\U0000295C',
+ "RightUpVector;": '\U000021BE',
+ "RightUpVectorBar;": '\U00002954',
+ "RightVector;": '\U000021C0',
+ "RightVectorBar;": '\U00002953',
+ "Rightarrow;": '\U000021D2',
+ "Ropf;": '\U0000211D',
+ "RoundImplies;": '\U00002970',
+ "Rrightarrow;": '\U000021DB',
+ "Rscr;": '\U0000211B',
+ "Rsh;": '\U000021B1',
+ "RuleDelayed;": '\U000029F4',
+ "SHCHcy;": '\U00000429',
+ "SHcy;": '\U00000428',
+ "SOFTcy;": '\U0000042C',
+ "Sacute;": '\U0000015A',
+ "Sc;": '\U00002ABC',
+ "Scaron;": '\U00000160',
+ "Scedil;": '\U0000015E',
+ "Scirc;": '\U0000015C',
+ "Scy;": '\U00000421',
+ "Sfr;": '\U0001D516',
+ "ShortDownArrow;": '\U00002193',
+ "ShortLeftArrow;": '\U00002190',
+ "ShortRightArrow;": '\U00002192',
+ "ShortUpArrow;": '\U00002191',
+ "Sigma;": '\U000003A3',
+ "SmallCircle;": '\U00002218',
+ "Sopf;": '\U0001D54A',
+ "Sqrt;": '\U0000221A',
+ "Square;": '\U000025A1',
+ "SquareIntersection;": '\U00002293',
+ "SquareSubset;": '\U0000228F',
+ "SquareSubsetEqual;": '\U00002291',
+ "SquareSuperset;": '\U00002290',
+ "SquareSupersetEqual;": '\U00002292',
+ "SquareUnion;": '\U00002294',
+ "Sscr;": '\U0001D4AE',
+ "Star;": '\U000022C6',
+ "Sub;": '\U000022D0',
+ "Subset;": '\U000022D0',
+ "SubsetEqual;": '\U00002286',
+ "Succeeds;": '\U0000227B',
+ "SucceedsEqual;": '\U00002AB0',
+ "SucceedsSlantEqual;": '\U0000227D',
+ "SucceedsTilde;": '\U0000227F',
+ "SuchThat;": '\U0000220B',
+ "Sum;": '\U00002211',
+ "Sup;": '\U000022D1',
+ "Superset;": '\U00002283',
+ "SupersetEqual;": '\U00002287',
+ "Supset;": '\U000022D1',
+ "THORN;": '\U000000DE',
+ "TRADE;": '\U00002122',
+ "TSHcy;": '\U0000040B',
+ "TScy;": '\U00000426',
+ "Tab;": '\U00000009',
+ "Tau;": '\U000003A4',
+ "Tcaron;": '\U00000164',
+ "Tcedil;": '\U00000162',
+ "Tcy;": '\U00000422',
+ "Tfr;": '\U0001D517',
+ "Therefore;": '\U00002234',
+ "Theta;": '\U00000398',
+ "ThinSpace;": '\U00002009',
+ "Tilde;": '\U0000223C',
+ "TildeEqual;": '\U00002243',
+ "TildeFullEqual;": '\U00002245',
+ "TildeTilde;": '\U00002248',
+ "Topf;": '\U0001D54B',
+ "TripleDot;": '\U000020DB',
+ "Tscr;": '\U0001D4AF',
+ "Tstrok;": '\U00000166',
+ "Uacute;": '\U000000DA',
+ "Uarr;": '\U0000219F',
+ "Uarrocir;": '\U00002949',
+ "Ubrcy;": '\U0000040E',
+ "Ubreve;": '\U0000016C',
+ "Ucirc;": '\U000000DB',
+ "Ucy;": '\U00000423',
+ "Udblac;": '\U00000170',
+ "Ufr;": '\U0001D518',
+ "Ugrave;": '\U000000D9',
+ "Umacr;": '\U0000016A',
+ "UnderBar;": '\U0000005F',
+ "UnderBrace;": '\U000023DF',
+ "UnderBracket;": '\U000023B5',
+ "UnderParenthesis;": '\U000023DD',
+ "Union;": '\U000022C3',
+ "UnionPlus;": '\U0000228E',
+ "Uogon;": '\U00000172',
+ "Uopf;": '\U0001D54C',
+ "UpArrow;": '\U00002191',
+ "UpArrowBar;": '\U00002912',
+ "UpArrowDownArrow;": '\U000021C5',
+ "UpDownArrow;": '\U00002195',
+ "UpEquilibrium;": '\U0000296E',
+ "UpTee;": '\U000022A5',
+ "UpTeeArrow;": '\U000021A5',
+ "Uparrow;": '\U000021D1',
+ "Updownarrow;": '\U000021D5',
+ "UpperLeftArrow;": '\U00002196',
+ "UpperRightArrow;": '\U00002197',
+ "Upsi;": '\U000003D2',
+ "Upsilon;": '\U000003A5',
+ "Uring;": '\U0000016E',
+ "Uscr;": '\U0001D4B0',
+ "Utilde;": '\U00000168',
+ "Uuml;": '\U000000DC',
+ "VDash;": '\U000022AB',
+ "Vbar;": '\U00002AEB',
+ "Vcy;": '\U00000412',
+ "Vdash;": '\U000022A9',
+ "Vdashl;": '\U00002AE6',
+ "Vee;": '\U000022C1',
+ "Verbar;": '\U00002016',
+ "Vert;": '\U00002016',
+ "VerticalBar;": '\U00002223',
+ "VerticalLine;": '\U0000007C',
+ "VerticalSeparator;": '\U00002758',
+ "VerticalTilde;": '\U00002240',
+ "VeryThinSpace;": '\U0000200A',
+ "Vfr;": '\U0001D519',
+ "Vopf;": '\U0001D54D',
+ "Vscr;": '\U0001D4B1',
+ "Vvdash;": '\U000022AA',
+ "Wcirc;": '\U00000174',
+ "Wedge;": '\U000022C0',
+ "Wfr;": '\U0001D51A',
+ "Wopf;": '\U0001D54E',
+ "Wscr;": '\U0001D4B2',
+ "Xfr;": '\U0001D51B',
+ "Xi;": '\U0000039E',
+ "Xopf;": '\U0001D54F',
+ "Xscr;": '\U0001D4B3',
+ "YAcy;": '\U0000042F',
+ "YIcy;": '\U00000407',
+ "YUcy;": '\U0000042E',
+ "Yacute;": '\U000000DD',
+ "Ycirc;": '\U00000176',
+ "Ycy;": '\U0000042B',
+ "Yfr;": '\U0001D51C',
+ "Yopf;": '\U0001D550',
+ "Yscr;": '\U0001D4B4',
+ "Yuml;": '\U00000178',
+ "ZHcy;": '\U00000416',
+ "Zacute;": '\U00000179',
+ "Zcaron;": '\U0000017D',
+ "Zcy;": '\U00000417',
+ "Zdot;": '\U0000017B',
+ "ZeroWidthSpace;": '\U0000200B',
+ "Zeta;": '\U00000396',
+ "Zfr;": '\U00002128',
+ "Zopf;": '\U00002124',
+ "Zscr;": '\U0001D4B5',
+ "aacute;": '\U000000E1',
+ "abreve;": '\U00000103',
+ "ac;": '\U0000223E',
+ "acd;": '\U0000223F',
+ "acirc;": '\U000000E2',
+ "acute;": '\U000000B4',
+ "acy;": '\U00000430',
+ "aelig;": '\U000000E6',
+ "af;": '\U00002061',
+ "afr;": '\U0001D51E',
+ "agrave;": '\U000000E0',
+ "alefsym;": '\U00002135',
+ "aleph;": '\U00002135',
+ "alpha;": '\U000003B1',
+ "amacr;": '\U00000101',
+ "amalg;": '\U00002A3F',
+ "amp;": '\U00000026',
+ "and;": '\U00002227',
+ "andand;": '\U00002A55',
+ "andd;": '\U00002A5C',
+ "andslope;": '\U00002A58',
+ "andv;": '\U00002A5A',
+ "ang;": '\U00002220',
+ "ange;": '\U000029A4',
+ "angle;": '\U00002220',
+ "angmsd;": '\U00002221',
+ "angmsdaa;": '\U000029A8',
+ "angmsdab;": '\U000029A9',
+ "angmsdac;": '\U000029AA',
+ "angmsdad;": '\U000029AB',
+ "angmsdae;": '\U000029AC',
+ "angmsdaf;": '\U000029AD',
+ "angmsdag;": '\U000029AE',
+ "angmsdah;": '\U000029AF',
+ "angrt;": '\U0000221F',
+ "angrtvb;": '\U000022BE',
+ "angrtvbd;": '\U0000299D',
+ "angsph;": '\U00002222',
+ "angst;": '\U000000C5',
+ "angzarr;": '\U0000237C',
+ "aogon;": '\U00000105',
+ "aopf;": '\U0001D552',
+ "ap;": '\U00002248',
+ "apE;": '\U00002A70',
+ "apacir;": '\U00002A6F',
+ "ape;": '\U0000224A',
+ "apid;": '\U0000224B',
+ "apos;": '\U00000027',
+ "approx;": '\U00002248',
+ "approxeq;": '\U0000224A',
+ "aring;": '\U000000E5',
+ "ascr;": '\U0001D4B6',
+ "ast;": '\U0000002A',
+ "asymp;": '\U00002248',
+ "asympeq;": '\U0000224D',
+ "atilde;": '\U000000E3',
+ "auml;": '\U000000E4',
+ "awconint;": '\U00002233',
+ "awint;": '\U00002A11',
+ "bNot;": '\U00002AED',
+ "backcong;": '\U0000224C',
+ "backepsilon;": '\U000003F6',
+ "backprime;": '\U00002035',
+ "backsim;": '\U0000223D',
+ "backsimeq;": '\U000022CD',
+ "barvee;": '\U000022BD',
+ "barwed;": '\U00002305',
+ "barwedge;": '\U00002305',
+ "bbrk;": '\U000023B5',
+ "bbrktbrk;": '\U000023B6',
+ "bcong;": '\U0000224C',
+ "bcy;": '\U00000431',
+ "bdquo;": '\U0000201E',
+ "becaus;": '\U00002235',
+ "because;": '\U00002235',
+ "bemptyv;": '\U000029B0',
+ "bepsi;": '\U000003F6',
+ "bernou;": '\U0000212C',
+ "beta;": '\U000003B2',
+ "beth;": '\U00002136',
+ "between;": '\U0000226C',
+ "bfr;": '\U0001D51F',
+ "bigcap;": '\U000022C2',
+ "bigcirc;": '\U000025EF',
+ "bigcup;": '\U000022C3',
+ "bigodot;": '\U00002A00',
+ "bigoplus;": '\U00002A01',
+ "bigotimes;": '\U00002A02',
+ "bigsqcup;": '\U00002A06',
+ "bigstar;": '\U00002605',
+ "bigtriangledown;": '\U000025BD',
+ "bigtriangleup;": '\U000025B3',
+ "biguplus;": '\U00002A04',
+ "bigvee;": '\U000022C1',
+ "bigwedge;": '\U000022C0',
+ "bkarow;": '\U0000290D',
+ "blacklozenge;": '\U000029EB',
+ "blacksquare;": '\U000025AA',
+ "blacktriangle;": '\U000025B4',
+ "blacktriangledown;": '\U000025BE',
+ "blacktriangleleft;": '\U000025C2',
+ "blacktriangleright;": '\U000025B8',
+ "blank;": '\U00002423',
+ "blk12;": '\U00002592',
+ "blk14;": '\U00002591',
+ "blk34;": '\U00002593',
+ "block;": '\U00002588',
+ "bnot;": '\U00002310',
+ "bopf;": '\U0001D553',
+ "bot;": '\U000022A5',
+ "bottom;": '\U000022A5',
+ "bowtie;": '\U000022C8',
+ "boxDL;": '\U00002557',
+ "boxDR;": '\U00002554',
+ "boxDl;": '\U00002556',
+ "boxDr;": '\U00002553',
+ "boxH;": '\U00002550',
+ "boxHD;": '\U00002566',
+ "boxHU;": '\U00002569',
+ "boxHd;": '\U00002564',
+ "boxHu;": '\U00002567',
+ "boxUL;": '\U0000255D',
+ "boxUR;": '\U0000255A',
+ "boxUl;": '\U0000255C',
+ "boxUr;": '\U00002559',
+ "boxV;": '\U00002551',
+ "boxVH;": '\U0000256C',
+ "boxVL;": '\U00002563',
+ "boxVR;": '\U00002560',
+ "boxVh;": '\U0000256B',
+ "boxVl;": '\U00002562',
+ "boxVr;": '\U0000255F',
+ "boxbox;": '\U000029C9',
+ "boxdL;": '\U00002555',
+ "boxdR;": '\U00002552',
+ "boxdl;": '\U00002510',
+ "boxdr;": '\U0000250C',
+ "boxh;": '\U00002500',
+ "boxhD;": '\U00002565',
+ "boxhU;": '\U00002568',
+ "boxhd;": '\U0000252C',
+ "boxhu;": '\U00002534',
+ "boxminus;": '\U0000229F',
+ "boxplus;": '\U0000229E',
+ "boxtimes;": '\U000022A0',
+ "boxuL;": '\U0000255B',
+ "boxuR;": '\U00002558',
+ "boxul;": '\U00002518',
+ "boxur;": '\U00002514',
+ "boxv;": '\U00002502',
+ "boxvH;": '\U0000256A',
+ "boxvL;": '\U00002561',
+ "boxvR;": '\U0000255E',
+ "boxvh;": '\U0000253C',
+ "boxvl;": '\U00002524',
+ "boxvr;": '\U0000251C',
+ "bprime;": '\U00002035',
+ "breve;": '\U000002D8',
+ "brvbar;": '\U000000A6',
+ "bscr;": '\U0001D4B7',
+ "bsemi;": '\U0000204F',
+ "bsim;": '\U0000223D',
+ "bsime;": '\U000022CD',
+ "bsol;": '\U0000005C',
+ "bsolb;": '\U000029C5',
+ "bsolhsub;": '\U000027C8',
+ "bull;": '\U00002022',
+ "bullet;": '\U00002022',
+ "bump;": '\U0000224E',
+ "bumpE;": '\U00002AAE',
+ "bumpe;": '\U0000224F',
+ "bumpeq;": '\U0000224F',
+ "cacute;": '\U00000107',
+ "cap;": '\U00002229',
+ "capand;": '\U00002A44',
+ "capbrcup;": '\U00002A49',
+ "capcap;": '\U00002A4B',
+ "capcup;": '\U00002A47',
+ "capdot;": '\U00002A40',
+ "caret;": '\U00002041',
+ "caron;": '\U000002C7',
+ "ccaps;": '\U00002A4D',
+ "ccaron;": '\U0000010D',
+ "ccedil;": '\U000000E7',
+ "ccirc;": '\U00000109',
+ "ccups;": '\U00002A4C',
+ "ccupssm;": '\U00002A50',
+ "cdot;": '\U0000010B',
+ "cedil;": '\U000000B8',
+ "cemptyv;": '\U000029B2',
+ "cent;": '\U000000A2',
+ "centerdot;": '\U000000B7',
+ "cfr;": '\U0001D520',
+ "chcy;": '\U00000447',
+ "check;": '\U00002713',
+ "checkmark;": '\U00002713',
+ "chi;": '\U000003C7',
+ "cir;": '\U000025CB',
+ "cirE;": '\U000029C3',
+ "circ;": '\U000002C6',
+ "circeq;": '\U00002257',
+ "circlearrowleft;": '\U000021BA',
+ "circlearrowright;": '\U000021BB',
+ "circledR;": '\U000000AE',
+ "circledS;": '\U000024C8',
+ "circledast;": '\U0000229B',
+ "circledcirc;": '\U0000229A',
+ "circleddash;": '\U0000229D',
+ "cire;": '\U00002257',
+ "cirfnint;": '\U00002A10',
+ "cirmid;": '\U00002AEF',
+ "cirscir;": '\U000029C2',
+ "clubs;": '\U00002663',
+ "clubsuit;": '\U00002663',
+ "colon;": '\U0000003A',
+ "colone;": '\U00002254',
+ "coloneq;": '\U00002254',
+ "comma;": '\U0000002C',
+ "commat;": '\U00000040',
+ "comp;": '\U00002201',
+ "compfn;": '\U00002218',
+ "complement;": '\U00002201',
+ "complexes;": '\U00002102',
+ "cong;": '\U00002245',
+ "congdot;": '\U00002A6D',
+ "conint;": '\U0000222E',
+ "copf;": '\U0001D554',
+ "coprod;": '\U00002210',
+ "copy;": '\U000000A9',
+ "copysr;": '\U00002117',
+ "crarr;": '\U000021B5',
+ "cross;": '\U00002717',
+ "cscr;": '\U0001D4B8',
+ "csub;": '\U00002ACF',
+ "csube;": '\U00002AD1',
+ "csup;": '\U00002AD0',
+ "csupe;": '\U00002AD2',
+ "ctdot;": '\U000022EF',
+ "cudarrl;": '\U00002938',
+ "cudarrr;": '\U00002935',
+ "cuepr;": '\U000022DE',
+ "cuesc;": '\U000022DF',
+ "cularr;": '\U000021B6',
+ "cularrp;": '\U0000293D',
+ "cup;": '\U0000222A',
+ "cupbrcap;": '\U00002A48',
+ "cupcap;": '\U00002A46',
+ "cupcup;": '\U00002A4A',
+ "cupdot;": '\U0000228D',
+ "cupor;": '\U00002A45',
+ "curarr;": '\U000021B7',
+ "curarrm;": '\U0000293C',
+ "curlyeqprec;": '\U000022DE',
+ "curlyeqsucc;": '\U000022DF',
+ "curlyvee;": '\U000022CE',
+ "curlywedge;": '\U000022CF',
+ "curren;": '\U000000A4',
+ "curvearrowleft;": '\U000021B6',
+ "curvearrowright;": '\U000021B7',
+ "cuvee;": '\U000022CE',
+ "cuwed;": '\U000022CF',
+ "cwconint;": '\U00002232',
+ "cwint;": '\U00002231',
+ "cylcty;": '\U0000232D',
+ "dArr;": '\U000021D3',
+ "dHar;": '\U00002965',
+ "dagger;": '\U00002020',
+ "daleth;": '\U00002138',
+ "darr;": '\U00002193',
+ "dash;": '\U00002010',
+ "dashv;": '\U000022A3',
+ "dbkarow;": '\U0000290F',
+ "dblac;": '\U000002DD',
+ "dcaron;": '\U0000010F',
+ "dcy;": '\U00000434',
+ "dd;": '\U00002146',
+ "ddagger;": '\U00002021',
+ "ddarr;": '\U000021CA',
+ "ddotseq;": '\U00002A77',
+ "deg;": '\U000000B0',
+ "delta;": '\U000003B4',
+ "demptyv;": '\U000029B1',
+ "dfisht;": '\U0000297F',
+ "dfr;": '\U0001D521',
+ "dharl;": '\U000021C3',
+ "dharr;": '\U000021C2',
+ "diam;": '\U000022C4',
+ "diamond;": '\U000022C4',
+ "diamondsuit;": '\U00002666',
+ "diams;": '\U00002666',
+ "die;": '\U000000A8',
+ "digamma;": '\U000003DD',
+ "disin;": '\U000022F2',
+ "div;": '\U000000F7',
+ "divide;": '\U000000F7',
+ "divideontimes;": '\U000022C7',
+ "divonx;": '\U000022C7',
+ "djcy;": '\U00000452',
+ "dlcorn;": '\U0000231E',
+ "dlcrop;": '\U0000230D',
+ "dollar;": '\U00000024',
+ "dopf;": '\U0001D555',
+ "dot;": '\U000002D9',
+ "doteq;": '\U00002250',
+ "doteqdot;": '\U00002251',
+ "dotminus;": '\U00002238',
+ "dotplus;": '\U00002214',
+ "dotsquare;": '\U000022A1',
+ "doublebarwedge;": '\U00002306',
+ "downarrow;": '\U00002193',
+ "downdownarrows;": '\U000021CA',
+ "downharpoonleft;": '\U000021C3',
+ "downharpoonright;": '\U000021C2',
+ "drbkarow;": '\U00002910',
+ "drcorn;": '\U0000231F',
+ "drcrop;": '\U0000230C',
+ "dscr;": '\U0001D4B9',
+ "dscy;": '\U00000455',
+ "dsol;": '\U000029F6',
+ "dstrok;": '\U00000111',
+ "dtdot;": '\U000022F1',
+ "dtri;": '\U000025BF',
+ "dtrif;": '\U000025BE',
+ "duarr;": '\U000021F5',
+ "duhar;": '\U0000296F',
+ "dwangle;": '\U000029A6',
+ "dzcy;": '\U0000045F',
+ "dzigrarr;": '\U000027FF',
+ "eDDot;": '\U00002A77',
+ "eDot;": '\U00002251',
+ "eacute;": '\U000000E9',
+ "easter;": '\U00002A6E',
+ "ecaron;": '\U0000011B',
+ "ecir;": '\U00002256',
+ "ecirc;": '\U000000EA',
+ "ecolon;": '\U00002255',
+ "ecy;": '\U0000044D',
+ "edot;": '\U00000117',
+ "ee;": '\U00002147',
+ "efDot;": '\U00002252',
+ "efr;": '\U0001D522',
+ "eg;": '\U00002A9A',
+ "egrave;": '\U000000E8',
+ "egs;": '\U00002A96',
+ "egsdot;": '\U00002A98',
+ "el;": '\U00002A99',
+ "elinters;": '\U000023E7',
+ "ell;": '\U00002113',
+ "els;": '\U00002A95',
+ "elsdot;": '\U00002A97',
+ "emacr;": '\U00000113',
+ "empty;": '\U00002205',
+ "emptyset;": '\U00002205',
+ "emptyv;": '\U00002205',
+ "emsp;": '\U00002003',
+ "emsp13;": '\U00002004',
+ "emsp14;": '\U00002005',
+ "eng;": '\U0000014B',
+ "ensp;": '\U00002002',
+ "eogon;": '\U00000119',
+ "eopf;": '\U0001D556',
+ "epar;": '\U000022D5',
+ "eparsl;": '\U000029E3',
+ "eplus;": '\U00002A71',
+ "epsi;": '\U000003B5',
+ "epsilon;": '\U000003B5',
+ "epsiv;": '\U000003F5',
+ "eqcirc;": '\U00002256',
+ "eqcolon;": '\U00002255',
+ "eqsim;": '\U00002242',
+ "eqslantgtr;": '\U00002A96',
+ "eqslantless;": '\U00002A95',
+ "equals;": '\U0000003D',
+ "equest;": '\U0000225F',
+ "equiv;": '\U00002261',
+ "equivDD;": '\U00002A78',
+ "eqvparsl;": '\U000029E5',
+ "erDot;": '\U00002253',
+ "erarr;": '\U00002971',
+ "escr;": '\U0000212F',
+ "esdot;": '\U00002250',
+ "esim;": '\U00002242',
+ "eta;": '\U000003B7',
+ "eth;": '\U000000F0',
+ "euml;": '\U000000EB',
+ "euro;": '\U000020AC',
+ "excl;": '\U00000021',
+ "exist;": '\U00002203',
+ "expectation;": '\U00002130',
+ "exponentiale;": '\U00002147',
+ "fallingdotseq;": '\U00002252',
+ "fcy;": '\U00000444',
+ "female;": '\U00002640',
+ "ffilig;": '\U0000FB03',
+ "fflig;": '\U0000FB00',
+ "ffllig;": '\U0000FB04',
+ "ffr;": '\U0001D523',
+ "filig;": '\U0000FB01',
+ "flat;": '\U0000266D',
+ "fllig;": '\U0000FB02',
+ "fltns;": '\U000025B1',
+ "fnof;": '\U00000192',
+ "fopf;": '\U0001D557',
+ "forall;": '\U00002200',
+ "fork;": '\U000022D4',
+ "forkv;": '\U00002AD9',
+ "fpartint;": '\U00002A0D',
+ "frac12;": '\U000000BD',
+ "frac13;": '\U00002153',
+ "frac14;": '\U000000BC',
+ "frac15;": '\U00002155',
+ "frac16;": '\U00002159',
+ "frac18;": '\U0000215B',
+ "frac23;": '\U00002154',
+ "frac25;": '\U00002156',
+ "frac34;": '\U000000BE',
+ "frac35;": '\U00002157',
+ "frac38;": '\U0000215C',
+ "frac45;": '\U00002158',
+ "frac56;": '\U0000215A',
+ "frac58;": '\U0000215D',
+ "frac78;": '\U0000215E',
+ "frasl;": '\U00002044',
+ "frown;": '\U00002322',
+ "fscr;": '\U0001D4BB',
+ "gE;": '\U00002267',
+ "gEl;": '\U00002A8C',
+ "gacute;": '\U000001F5',
+ "gamma;": '\U000003B3',
+ "gammad;": '\U000003DD',
+ "gap;": '\U00002A86',
+ "gbreve;": '\U0000011F',
+ "gcirc;": '\U0000011D',
+ "gcy;": '\U00000433',
+ "gdot;": '\U00000121',
+ "ge;": '\U00002265',
+ "gel;": '\U000022DB',
+ "geq;": '\U00002265',
+ "geqq;": '\U00002267',
+ "geqslant;": '\U00002A7E',
+ "ges;": '\U00002A7E',
+ "gescc;": '\U00002AA9',
+ "gesdot;": '\U00002A80',
+ "gesdoto;": '\U00002A82',
+ "gesdotol;": '\U00002A84',
+ "gesles;": '\U00002A94',
+ "gfr;": '\U0001D524',
+ "gg;": '\U0000226B',
+ "ggg;": '\U000022D9',
+ "gimel;": '\U00002137',
+ "gjcy;": '\U00000453',
+ "gl;": '\U00002277',
+ "glE;": '\U00002A92',
+ "gla;": '\U00002AA5',
+ "glj;": '\U00002AA4',
+ "gnE;": '\U00002269',
+ "gnap;": '\U00002A8A',
+ "gnapprox;": '\U00002A8A',
+ "gne;": '\U00002A88',
+ "gneq;": '\U00002A88',
+ "gneqq;": '\U00002269',
+ "gnsim;": '\U000022E7',
+ "gopf;": '\U0001D558',
+ "grave;": '\U00000060',
+ "gscr;": '\U0000210A',
+ "gsim;": '\U00002273',
+ "gsime;": '\U00002A8E',
+ "gsiml;": '\U00002A90',
+ "gt;": '\U0000003E',
+ "gtcc;": '\U00002AA7',
+ "gtcir;": '\U00002A7A',
+ "gtdot;": '\U000022D7',
+ "gtlPar;": '\U00002995',
+ "gtquest;": '\U00002A7C',
+ "gtrapprox;": '\U00002A86',
+ "gtrarr;": '\U00002978',
+ "gtrdot;": '\U000022D7',
+ "gtreqless;": '\U000022DB',
+ "gtreqqless;": '\U00002A8C',
+ "gtrless;": '\U00002277',
+ "gtrsim;": '\U00002273',
+ "hArr;": '\U000021D4',
+ "hairsp;": '\U0000200A',
+ "half;": '\U000000BD',
+ "hamilt;": '\U0000210B',
+ "hardcy;": '\U0000044A',
+ "harr;": '\U00002194',
+ "harrcir;": '\U00002948',
+ "harrw;": '\U000021AD',
+ "hbar;": '\U0000210F',
+ "hcirc;": '\U00000125',
+ "hearts;": '\U00002665',
+ "heartsuit;": '\U00002665',
+ "hellip;": '\U00002026',
+ "hercon;": '\U000022B9',
+ "hfr;": '\U0001D525',
+ "hksearow;": '\U00002925',
+ "hkswarow;": '\U00002926',
+ "hoarr;": '\U000021FF',
+ "homtht;": '\U0000223B',
+ "hookleftarrow;": '\U000021A9',
+ "hookrightarrow;": '\U000021AA',
+ "hopf;": '\U0001D559',
+ "horbar;": '\U00002015',
+ "hscr;": '\U0001D4BD',
+ "hslash;": '\U0000210F',
+ "hstrok;": '\U00000127',
+ "hybull;": '\U00002043',
+ "hyphen;": '\U00002010',
+ "iacute;": '\U000000ED',
+ "ic;": '\U00002063',
+ "icirc;": '\U000000EE',
+ "icy;": '\U00000438',
+ "iecy;": '\U00000435',
+ "iexcl;": '\U000000A1',
+ "iff;": '\U000021D4',
+ "ifr;": '\U0001D526',
+ "igrave;": '\U000000EC',
+ "ii;": '\U00002148',
+ "iiiint;": '\U00002A0C',
+ "iiint;": '\U0000222D',
+ "iinfin;": '\U000029DC',
+ "iiota;": '\U00002129',
+ "ijlig;": '\U00000133',
+ "imacr;": '\U0000012B',
+ "image;": '\U00002111',
+ "imagline;": '\U00002110',
+ "imagpart;": '\U00002111',
+ "imath;": '\U00000131',
+ "imof;": '\U000022B7',
+ "imped;": '\U000001B5',
+ "in;": '\U00002208',
+ "incare;": '\U00002105',
+ "infin;": '\U0000221E',
+ "infintie;": '\U000029DD',
+ "inodot;": '\U00000131',
+ "int;": '\U0000222B',
+ "intcal;": '\U000022BA',
+ "integers;": '\U00002124',
+ "intercal;": '\U000022BA',
+ "intlarhk;": '\U00002A17',
+ "intprod;": '\U00002A3C',
+ "iocy;": '\U00000451',
+ "iogon;": '\U0000012F',
+ "iopf;": '\U0001D55A',
+ "iota;": '\U000003B9',
+ "iprod;": '\U00002A3C',
+ "iquest;": '\U000000BF',
+ "iscr;": '\U0001D4BE',
+ "isin;": '\U00002208',
+ "isinE;": '\U000022F9',
+ "isindot;": '\U000022F5',
+ "isins;": '\U000022F4',
+ "isinsv;": '\U000022F3',
+ "isinv;": '\U00002208',
+ "it;": '\U00002062',
+ "itilde;": '\U00000129',
+ "iukcy;": '\U00000456',
+ "iuml;": '\U000000EF',
+ "jcirc;": '\U00000135',
+ "jcy;": '\U00000439',
+ "jfr;": '\U0001D527',
+ "jmath;": '\U00000237',
+ "jopf;": '\U0001D55B',
+ "jscr;": '\U0001D4BF',
+ "jsercy;": '\U00000458',
+ "jukcy;": '\U00000454',
+ "kappa;": '\U000003BA',
+ "kappav;": '\U000003F0',
+ "kcedil;": '\U00000137',
+ "kcy;": '\U0000043A',
+ "kfr;": '\U0001D528',
+ "kgreen;": '\U00000138',
+ "khcy;": '\U00000445',
+ "kjcy;": '\U0000045C',
+ "kopf;": '\U0001D55C',
+ "kscr;": '\U0001D4C0',
+ "lAarr;": '\U000021DA',
+ "lArr;": '\U000021D0',
+ "lAtail;": '\U0000291B',
+ "lBarr;": '\U0000290E',
+ "lE;": '\U00002266',
+ "lEg;": '\U00002A8B',
+ "lHar;": '\U00002962',
+ "lacute;": '\U0000013A',
+ "laemptyv;": '\U000029B4',
+ "lagran;": '\U00002112',
+ "lambda;": '\U000003BB',
+ "lang;": '\U000027E8',
+ "langd;": '\U00002991',
+ "langle;": '\U000027E8',
+ "lap;": '\U00002A85',
+ "laquo;": '\U000000AB',
+ "larr;": '\U00002190',
+ "larrb;": '\U000021E4',
+ "larrbfs;": '\U0000291F',
+ "larrfs;": '\U0000291D',
+ "larrhk;": '\U000021A9',
+ "larrlp;": '\U000021AB',
+ "larrpl;": '\U00002939',
+ "larrsim;": '\U00002973',
+ "larrtl;": '\U000021A2',
+ "lat;": '\U00002AAB',
+ "latail;": '\U00002919',
+ "late;": '\U00002AAD',
+ "lbarr;": '\U0000290C',
+ "lbbrk;": '\U00002772',
+ "lbrace;": '\U0000007B',
+ "lbrack;": '\U0000005B',
+ "lbrke;": '\U0000298B',
+ "lbrksld;": '\U0000298F',
+ "lbrkslu;": '\U0000298D',
+ "lcaron;": '\U0000013E',
+ "lcedil;": '\U0000013C',
+ "lceil;": '\U00002308',
+ "lcub;": '\U0000007B',
+ "lcy;": '\U0000043B',
+ "ldca;": '\U00002936',
+ "ldquo;": '\U0000201C',
+ "ldquor;": '\U0000201E',
+ "ldrdhar;": '\U00002967',
+ "ldrushar;": '\U0000294B',
+ "ldsh;": '\U000021B2',
+ "le;": '\U00002264',
+ "leftarrow;": '\U00002190',
+ "leftarrowtail;": '\U000021A2',
+ "leftharpoondown;": '\U000021BD',
+ "leftharpoonup;": '\U000021BC',
+ "leftleftarrows;": '\U000021C7',
+ "leftrightarrow;": '\U00002194',
+ "leftrightarrows;": '\U000021C6',
+ "leftrightharpoons;": '\U000021CB',
+ "leftrightsquigarrow;": '\U000021AD',
+ "leftthreetimes;": '\U000022CB',
+ "leg;": '\U000022DA',
+ "leq;": '\U00002264',
+ "leqq;": '\U00002266',
+ "leqslant;": '\U00002A7D',
+ "les;": '\U00002A7D',
+ "lescc;": '\U00002AA8',
+ "lesdot;": '\U00002A7F',
+ "lesdoto;": '\U00002A81',
+ "lesdotor;": '\U00002A83',
+ "lesges;": '\U00002A93',
+ "lessapprox;": '\U00002A85',
+ "lessdot;": '\U000022D6',
+ "lesseqgtr;": '\U000022DA',
+ "lesseqqgtr;": '\U00002A8B',
+ "lessgtr;": '\U00002276',
+ "lesssim;": '\U00002272',
+ "lfisht;": '\U0000297C',
+ "lfloor;": '\U0000230A',
+ "lfr;": '\U0001D529',
+ "lg;": '\U00002276',
+ "lgE;": '\U00002A91',
+ "lhard;": '\U000021BD',
+ "lharu;": '\U000021BC',
+ "lharul;": '\U0000296A',
+ "lhblk;": '\U00002584',
+ "ljcy;": '\U00000459',
+ "ll;": '\U0000226A',
+ "llarr;": '\U000021C7',
+ "llcorner;": '\U0000231E',
+ "llhard;": '\U0000296B',
+ "lltri;": '\U000025FA',
+ "lmidot;": '\U00000140',
+ "lmoust;": '\U000023B0',
+ "lmoustache;": '\U000023B0',
+ "lnE;": '\U00002268',
+ "lnap;": '\U00002A89',
+ "lnapprox;": '\U00002A89',
+ "lne;": '\U00002A87',
+ "lneq;": '\U00002A87',
+ "lneqq;": '\U00002268',
+ "lnsim;": '\U000022E6',
+ "loang;": '\U000027EC',
+ "loarr;": '\U000021FD',
+ "lobrk;": '\U000027E6',
+ "longleftarrow;": '\U000027F5',
+ "longleftrightarrow;": '\U000027F7',
+ "longmapsto;": '\U000027FC',
+ "longrightarrow;": '\U000027F6',
+ "looparrowleft;": '\U000021AB',
+ "looparrowright;": '\U000021AC',
+ "lopar;": '\U00002985',
+ "lopf;": '\U0001D55D',
+ "loplus;": '\U00002A2D',
+ "lotimes;": '\U00002A34',
+ "lowast;": '\U00002217',
+ "lowbar;": '\U0000005F',
+ "loz;": '\U000025CA',
+ "lozenge;": '\U000025CA',
+ "lozf;": '\U000029EB',
+ "lpar;": '\U00000028',
+ "lparlt;": '\U00002993',
+ "lrarr;": '\U000021C6',
+ "lrcorner;": '\U0000231F',
+ "lrhar;": '\U000021CB',
+ "lrhard;": '\U0000296D',
+ "lrm;": '\U0000200E',
+ "lrtri;": '\U000022BF',
+ "lsaquo;": '\U00002039',
+ "lscr;": '\U0001D4C1',
+ "lsh;": '\U000021B0',
+ "lsim;": '\U00002272',
+ "lsime;": '\U00002A8D',
+ "lsimg;": '\U00002A8F',
+ "lsqb;": '\U0000005B',
+ "lsquo;": '\U00002018',
+ "lsquor;": '\U0000201A',
+ "lstrok;": '\U00000142',
+ "lt;": '\U0000003C',
+ "ltcc;": '\U00002AA6',
+ "ltcir;": '\U00002A79',
+ "ltdot;": '\U000022D6',
+ "lthree;": '\U000022CB',
+ "ltimes;": '\U000022C9',
+ "ltlarr;": '\U00002976',
+ "ltquest;": '\U00002A7B',
+ "ltrPar;": '\U00002996',
+ "ltri;": '\U000025C3',
+ "ltrie;": '\U000022B4',
+ "ltrif;": '\U000025C2',
+ "lurdshar;": '\U0000294A',
+ "luruhar;": '\U00002966',
+ "mDDot;": '\U0000223A',
+ "macr;": '\U000000AF',
+ "male;": '\U00002642',
+ "malt;": '\U00002720',
+ "maltese;": '\U00002720',
+ "map;": '\U000021A6',
+ "mapsto;": '\U000021A6',
+ "mapstodown;": '\U000021A7',
+ "mapstoleft;": '\U000021A4',
+ "mapstoup;": '\U000021A5',
+ "marker;": '\U000025AE',
+ "mcomma;": '\U00002A29',
+ "mcy;": '\U0000043C',
+ "mdash;": '\U00002014',
+ "measuredangle;": '\U00002221',
+ "mfr;": '\U0001D52A',
+ "mho;": '\U00002127',
+ "micro;": '\U000000B5',
+ "mid;": '\U00002223',
+ "midast;": '\U0000002A',
+ "midcir;": '\U00002AF0',
+ "middot;": '\U000000B7',
+ "minus;": '\U00002212',
+ "minusb;": '\U0000229F',
+ "minusd;": '\U00002238',
+ "minusdu;": '\U00002A2A',
+ "mlcp;": '\U00002ADB',
+ "mldr;": '\U00002026',
+ "mnplus;": '\U00002213',
+ "models;": '\U000022A7',
+ "mopf;": '\U0001D55E',
+ "mp;": '\U00002213',
+ "mscr;": '\U0001D4C2',
+ "mstpos;": '\U0000223E',
+ "mu;": '\U000003BC',
+ "multimap;": '\U000022B8',
+ "mumap;": '\U000022B8',
+ "nLeftarrow;": '\U000021CD',
+ "nLeftrightarrow;": '\U000021CE',
+ "nRightarrow;": '\U000021CF',
+ "nVDash;": '\U000022AF',
+ "nVdash;": '\U000022AE',
+ "nabla;": '\U00002207',
+ "nacute;": '\U00000144',
+ "nap;": '\U00002249',
+ "napos;": '\U00000149',
+ "napprox;": '\U00002249',
+ "natur;": '\U0000266E',
+ "natural;": '\U0000266E',
+ "naturals;": '\U00002115',
+ "nbsp;": '\U000000A0',
+ "ncap;": '\U00002A43',
+ "ncaron;": '\U00000148',
+ "ncedil;": '\U00000146',
+ "ncong;": '\U00002247',
+ "ncup;": '\U00002A42',
+ "ncy;": '\U0000043D',
+ "ndash;": '\U00002013',
+ "ne;": '\U00002260',
+ "neArr;": '\U000021D7',
+ "nearhk;": '\U00002924',
+ "nearr;": '\U00002197',
+ "nearrow;": '\U00002197',
+ "nequiv;": '\U00002262',
+ "nesear;": '\U00002928',
+ "nexist;": '\U00002204',
+ "nexists;": '\U00002204',
+ "nfr;": '\U0001D52B',
+ "nge;": '\U00002271',
+ "ngeq;": '\U00002271',
+ "ngsim;": '\U00002275',
+ "ngt;": '\U0000226F',
+ "ngtr;": '\U0000226F',
+ "nhArr;": '\U000021CE',
+ "nharr;": '\U000021AE',
+ "nhpar;": '\U00002AF2',
+ "ni;": '\U0000220B',
+ "nis;": '\U000022FC',
+ "nisd;": '\U000022FA',
+ "niv;": '\U0000220B',
+ "njcy;": '\U0000045A',
+ "nlArr;": '\U000021CD',
+ "nlarr;": '\U0000219A',
+ "nldr;": '\U00002025',
+ "nle;": '\U00002270',
+ "nleftarrow;": '\U0000219A',
+ "nleftrightarrow;": '\U000021AE',
+ "nleq;": '\U00002270',
+ "nless;": '\U0000226E',
+ "nlsim;": '\U00002274',
+ "nlt;": '\U0000226E',
+ "nltri;": '\U000022EA',
+ "nltrie;": '\U000022EC',
+ "nmid;": '\U00002224',
+ "nopf;": '\U0001D55F',
+ "not;": '\U000000AC',
+ "notin;": '\U00002209',
+ "notinva;": '\U00002209',
+ "notinvb;": '\U000022F7',
+ "notinvc;": '\U000022F6',
+ "notni;": '\U0000220C',
+ "notniva;": '\U0000220C',
+ "notnivb;": '\U000022FE',
+ "notnivc;": '\U000022FD',
+ "npar;": '\U00002226',
+ "nparallel;": '\U00002226',
+ "npolint;": '\U00002A14',
+ "npr;": '\U00002280',
+ "nprcue;": '\U000022E0',
+ "nprec;": '\U00002280',
+ "nrArr;": '\U000021CF',
+ "nrarr;": '\U0000219B',
+ "nrightarrow;": '\U0000219B',
+ "nrtri;": '\U000022EB',
+ "nrtrie;": '\U000022ED',
+ "nsc;": '\U00002281',
+ "nsccue;": '\U000022E1',
+ "nscr;": '\U0001D4C3',
+ "nshortmid;": '\U00002224',
+ "nshortparallel;": '\U00002226',
+ "nsim;": '\U00002241',
+ "nsime;": '\U00002244',
+ "nsimeq;": '\U00002244',
+ "nsmid;": '\U00002224',
+ "nspar;": '\U00002226',
+ "nsqsube;": '\U000022E2',
+ "nsqsupe;": '\U000022E3',
+ "nsub;": '\U00002284',
+ "nsube;": '\U00002288',
+ "nsubseteq;": '\U00002288',
+ "nsucc;": '\U00002281',
+ "nsup;": '\U00002285',
+ "nsupe;": '\U00002289',
+ "nsupseteq;": '\U00002289',
+ "ntgl;": '\U00002279',
+ "ntilde;": '\U000000F1',
+ "ntlg;": '\U00002278',
+ "ntriangleleft;": '\U000022EA',
+ "ntrianglelefteq;": '\U000022EC',
+ "ntriangleright;": '\U000022EB',
+ "ntrianglerighteq;": '\U000022ED',
+ "nu;": '\U000003BD',
+ "num;": '\U00000023',
+ "numero;": '\U00002116',
+ "numsp;": '\U00002007',
+ "nvDash;": '\U000022AD',
+ "nvHarr;": '\U00002904',
+ "nvdash;": '\U000022AC',
+ "nvinfin;": '\U000029DE',
+ "nvlArr;": '\U00002902',
+ "nvrArr;": '\U00002903',
+ "nwArr;": '\U000021D6',
+ "nwarhk;": '\U00002923',
+ "nwarr;": '\U00002196',
+ "nwarrow;": '\U00002196',
+ "nwnear;": '\U00002927',
+ "oS;": '\U000024C8',
+ "oacute;": '\U000000F3',
+ "oast;": '\U0000229B',
+ "ocir;": '\U0000229A',
+ "ocirc;": '\U000000F4',
+ "ocy;": '\U0000043E',
+ "odash;": '\U0000229D',
+ "odblac;": '\U00000151',
+ "odiv;": '\U00002A38',
+ "odot;": '\U00002299',
+ "odsold;": '\U000029BC',
+ "oelig;": '\U00000153',
+ "ofcir;": '\U000029BF',
+ "ofr;": '\U0001D52C',
+ "ogon;": '\U000002DB',
+ "ograve;": '\U000000F2',
+ "ogt;": '\U000029C1',
+ "ohbar;": '\U000029B5',
+ "ohm;": '\U000003A9',
+ "oint;": '\U0000222E',
+ "olarr;": '\U000021BA',
+ "olcir;": '\U000029BE',
+ "olcross;": '\U000029BB',
+ "oline;": '\U0000203E',
+ "olt;": '\U000029C0',
+ "omacr;": '\U0000014D',
+ "omega;": '\U000003C9',
+ "omicron;": '\U000003BF',
+ "omid;": '\U000029B6',
+ "ominus;": '\U00002296',
+ "oopf;": '\U0001D560',
+ "opar;": '\U000029B7',
+ "operp;": '\U000029B9',
+ "oplus;": '\U00002295',
+ "or;": '\U00002228',
+ "orarr;": '\U000021BB',
+ "ord;": '\U00002A5D',
+ "order;": '\U00002134',
+ "orderof;": '\U00002134',
+ "ordf;": '\U000000AA',
+ "ordm;": '\U000000BA',
+ "origof;": '\U000022B6',
+ "oror;": '\U00002A56',
+ "orslope;": '\U00002A57',
+ "orv;": '\U00002A5B',
+ "oscr;": '\U00002134',
+ "oslash;": '\U000000F8',
+ "osol;": '\U00002298',
+ "otilde;": '\U000000F5',
+ "otimes;": '\U00002297',
+ "otimesas;": '\U00002A36',
+ "ouml;": '\U000000F6',
+ "ovbar;": '\U0000233D',
+ "par;": '\U00002225',
+ "para;": '\U000000B6',
+ "parallel;": '\U00002225',
+ "parsim;": '\U00002AF3',
+ "parsl;": '\U00002AFD',
+ "part;": '\U00002202',
+ "pcy;": '\U0000043F',
+ "percnt;": '\U00000025',
+ "period;": '\U0000002E',
+ "permil;": '\U00002030',
+ "perp;": '\U000022A5',
+ "pertenk;": '\U00002031',
+ "pfr;": '\U0001D52D',
+ "phi;": '\U000003C6',
+ "phiv;": '\U000003D5',
+ "phmmat;": '\U00002133',
+ "phone;": '\U0000260E',
+ "pi;": '\U000003C0',
+ "pitchfork;": '\U000022D4',
+ "piv;": '\U000003D6',
+ "planck;": '\U0000210F',
+ "planckh;": '\U0000210E',
+ "plankv;": '\U0000210F',
+ "plus;": '\U0000002B',
+ "plusacir;": '\U00002A23',
+ "plusb;": '\U0000229E',
+ "pluscir;": '\U00002A22',
+ "plusdo;": '\U00002214',
+ "plusdu;": '\U00002A25',
+ "pluse;": '\U00002A72',
+ "plusmn;": '\U000000B1',
+ "plussim;": '\U00002A26',
+ "plustwo;": '\U00002A27',
+ "pm;": '\U000000B1',
+ "pointint;": '\U00002A15',
+ "popf;": '\U0001D561',
+ "pound;": '\U000000A3',
+ "pr;": '\U0000227A',
+ "prE;": '\U00002AB3',
+ "prap;": '\U00002AB7',
+ "prcue;": '\U0000227C',
+ "pre;": '\U00002AAF',
+ "prec;": '\U0000227A',
+ "precapprox;": '\U00002AB7',
+ "preccurlyeq;": '\U0000227C',
+ "preceq;": '\U00002AAF',
+ "precnapprox;": '\U00002AB9',
+ "precneqq;": '\U00002AB5',
+ "precnsim;": '\U000022E8',
+ "precsim;": '\U0000227E',
+ "prime;": '\U00002032',
+ "primes;": '\U00002119',
+ "prnE;": '\U00002AB5',
+ "prnap;": '\U00002AB9',
+ "prnsim;": '\U000022E8',
+ "prod;": '\U0000220F',
+ "profalar;": '\U0000232E',
+ "profline;": '\U00002312',
+ "profsurf;": '\U00002313',
+ "prop;": '\U0000221D',
+ "propto;": '\U0000221D',
+ "prsim;": '\U0000227E',
+ "prurel;": '\U000022B0',
+ "pscr;": '\U0001D4C5',
+ "psi;": '\U000003C8',
+ "puncsp;": '\U00002008',
+ "qfr;": '\U0001D52E',
+ "qint;": '\U00002A0C',
+ "qopf;": '\U0001D562',
+ "qprime;": '\U00002057',
+ "qscr;": '\U0001D4C6',
+ "quaternions;": '\U0000210D',
+ "quatint;": '\U00002A16',
+ "quest;": '\U0000003F',
+ "questeq;": '\U0000225F',
+ "quot;": '\U00000022',
+ "rAarr;": '\U000021DB',
+ "rArr;": '\U000021D2',
+ "rAtail;": '\U0000291C',
+ "rBarr;": '\U0000290F',
+ "rHar;": '\U00002964',
+ "racute;": '\U00000155',
+ "radic;": '\U0000221A',
+ "raemptyv;": '\U000029B3',
+ "rang;": '\U000027E9',
+ "rangd;": '\U00002992',
+ "range;": '\U000029A5',
+ "rangle;": '\U000027E9',
+ "raquo;": '\U000000BB',
+ "rarr;": '\U00002192',
+ "rarrap;": '\U00002975',
+ "rarrb;": '\U000021E5',
+ "rarrbfs;": '\U00002920',
+ "rarrc;": '\U00002933',
+ "rarrfs;": '\U0000291E',
+ "rarrhk;": '\U000021AA',
+ "rarrlp;": '\U000021AC',
+ "rarrpl;": '\U00002945',
+ "rarrsim;": '\U00002974',
+ "rarrtl;": '\U000021A3',
+ "rarrw;": '\U0000219D',
+ "ratail;": '\U0000291A',
+ "ratio;": '\U00002236',
+ "rationals;": '\U0000211A',
+ "rbarr;": '\U0000290D',
+ "rbbrk;": '\U00002773',
+ "rbrace;": '\U0000007D',
+ "rbrack;": '\U0000005D',
+ "rbrke;": '\U0000298C',
+ "rbrksld;": '\U0000298E',
+ "rbrkslu;": '\U00002990',
+ "rcaron;": '\U00000159',
+ "rcedil;": '\U00000157',
+ "rceil;": '\U00002309',
+ "rcub;": '\U0000007D',
+ "rcy;": '\U00000440',
+ "rdca;": '\U00002937',
+ "rdldhar;": '\U00002969',
+ "rdquo;": '\U0000201D',
+ "rdquor;": '\U0000201D',
+ "rdsh;": '\U000021B3',
+ "real;": '\U0000211C',
+ "realine;": '\U0000211B',
+ "realpart;": '\U0000211C',
+ "reals;": '\U0000211D',
+ "rect;": '\U000025AD',
+ "reg;": '\U000000AE',
+ "rfisht;": '\U0000297D',
+ "rfloor;": '\U0000230B',
+ "rfr;": '\U0001D52F',
+ "rhard;": '\U000021C1',
+ "rharu;": '\U000021C0',
+ "rharul;": '\U0000296C',
+ "rho;": '\U000003C1',
+ "rhov;": '\U000003F1',
+ "rightarrow;": '\U00002192',
+ "rightarrowtail;": '\U000021A3',
+ "rightharpoondown;": '\U000021C1',
+ "rightharpoonup;": '\U000021C0',
+ "rightleftarrows;": '\U000021C4',
+ "rightleftharpoons;": '\U000021CC',
+ "rightrightarrows;": '\U000021C9',
+ "rightsquigarrow;": '\U0000219D',
+ "rightthreetimes;": '\U000022CC',
+ "ring;": '\U000002DA',
+ "risingdotseq;": '\U00002253',
+ "rlarr;": '\U000021C4',
+ "rlhar;": '\U000021CC',
+ "rlm;": '\U0000200F',
+ "rmoust;": '\U000023B1',
+ "rmoustache;": '\U000023B1',
+ "rnmid;": '\U00002AEE',
+ "roang;": '\U000027ED',
+ "roarr;": '\U000021FE',
+ "robrk;": '\U000027E7',
+ "ropar;": '\U00002986',
+ "ropf;": '\U0001D563',
+ "roplus;": '\U00002A2E',
+ "rotimes;": '\U00002A35',
+ "rpar;": '\U00000029',
+ "rpargt;": '\U00002994',
+ "rppolint;": '\U00002A12',
+ "rrarr;": '\U000021C9',
+ "rsaquo;": '\U0000203A',
+ "rscr;": '\U0001D4C7',
+ "rsh;": '\U000021B1',
+ "rsqb;": '\U0000005D',
+ "rsquo;": '\U00002019',
+ "rsquor;": '\U00002019',
+ "rthree;": '\U000022CC',
+ "rtimes;": '\U000022CA',
+ "rtri;": '\U000025B9',
+ "rtrie;": '\U000022B5',
+ "rtrif;": '\U000025B8',
+ "rtriltri;": '\U000029CE',
+ "ruluhar;": '\U00002968',
+ "rx;": '\U0000211E',
+ "sacute;": '\U0000015B',
+ "sbquo;": '\U0000201A',
+ "sc;": '\U0000227B',
+ "scE;": '\U00002AB4',
+ "scap;": '\U00002AB8',
+ "scaron;": '\U00000161',
+ "sccue;": '\U0000227D',
+ "sce;": '\U00002AB0',
+ "scedil;": '\U0000015F',
+ "scirc;": '\U0000015D',
+ "scnE;": '\U00002AB6',
+ "scnap;": '\U00002ABA',
+ "scnsim;": '\U000022E9',
+ "scpolint;": '\U00002A13',
+ "scsim;": '\U0000227F',
+ "scy;": '\U00000441',
+ "sdot;": '\U000022C5',
+ "sdotb;": '\U000022A1',
+ "sdote;": '\U00002A66',
+ "seArr;": '\U000021D8',
+ "searhk;": '\U00002925',
+ "searr;": '\U00002198',
+ "searrow;": '\U00002198',
+ "sect;": '\U000000A7',
+ "semi;": '\U0000003B',
+ "seswar;": '\U00002929',
+ "setminus;": '\U00002216',
+ "setmn;": '\U00002216',
+ "sext;": '\U00002736',
+ "sfr;": '\U0001D530',
+ "sfrown;": '\U00002322',
+ "sharp;": '\U0000266F',
+ "shchcy;": '\U00000449',
+ "shcy;": '\U00000448',
+ "shortmid;": '\U00002223',
+ "shortparallel;": '\U00002225',
+ "shy;": '\U000000AD',
+ "sigma;": '\U000003C3',
+ "sigmaf;": '\U000003C2',
+ "sigmav;": '\U000003C2',
+ "sim;": '\U0000223C',
+ "simdot;": '\U00002A6A',
+ "sime;": '\U00002243',
+ "simeq;": '\U00002243',
+ "simg;": '\U00002A9E',
+ "simgE;": '\U00002AA0',
+ "siml;": '\U00002A9D',
+ "simlE;": '\U00002A9F',
+ "simne;": '\U00002246',
+ "simplus;": '\U00002A24',
+ "simrarr;": '\U00002972',
+ "slarr;": '\U00002190',
+ "smallsetminus;": '\U00002216',
+ "smashp;": '\U00002A33',
+ "smeparsl;": '\U000029E4',
+ "smid;": '\U00002223',
+ "smile;": '\U00002323',
+ "smt;": '\U00002AAA',
+ "smte;": '\U00002AAC',
+ "softcy;": '\U0000044C',
+ "sol;": '\U0000002F',
+ "solb;": '\U000029C4',
+ "solbar;": '\U0000233F',
+ "sopf;": '\U0001D564',
+ "spades;": '\U00002660',
+ "spadesuit;": '\U00002660',
+ "spar;": '\U00002225',
+ "sqcap;": '\U00002293',
+ "sqcup;": '\U00002294',
+ "sqsub;": '\U0000228F',
+ "sqsube;": '\U00002291',
+ "sqsubset;": '\U0000228F',
+ "sqsubseteq;": '\U00002291',
+ "sqsup;": '\U00002290',
+ "sqsupe;": '\U00002292',
+ "sqsupset;": '\U00002290',
+ "sqsupseteq;": '\U00002292',
+ "squ;": '\U000025A1',
+ "square;": '\U000025A1',
+ "squarf;": '\U000025AA',
+ "squf;": '\U000025AA',
+ "srarr;": '\U00002192',
+ "sscr;": '\U0001D4C8',
+ "ssetmn;": '\U00002216',
+ "ssmile;": '\U00002323',
+ "sstarf;": '\U000022C6',
+ "star;": '\U00002606',
+ "starf;": '\U00002605',
+ "straightepsilon;": '\U000003F5',
+ "straightphi;": '\U000003D5',
+ "strns;": '\U000000AF',
+ "sub;": '\U00002282',
+ "subE;": '\U00002AC5',
+ "subdot;": '\U00002ABD',
+ "sube;": '\U00002286',
+ "subedot;": '\U00002AC3',
+ "submult;": '\U00002AC1',
+ "subnE;": '\U00002ACB',
+ "subne;": '\U0000228A',
+ "subplus;": '\U00002ABF',
+ "subrarr;": '\U00002979',
+ "subset;": '\U00002282',
+ "subseteq;": '\U00002286',
+ "subseteqq;": '\U00002AC5',
+ "subsetneq;": '\U0000228A',
+ "subsetneqq;": '\U00002ACB',
+ "subsim;": '\U00002AC7',
+ "subsub;": '\U00002AD5',
+ "subsup;": '\U00002AD3',
+ "succ;": '\U0000227B',
+ "succapprox;": '\U00002AB8',
+ "succcurlyeq;": '\U0000227D',
+ "succeq;": '\U00002AB0',
+ "succnapprox;": '\U00002ABA',
+ "succneqq;": '\U00002AB6',
+ "succnsim;": '\U000022E9',
+ "succsim;": '\U0000227F',
+ "sum;": '\U00002211',
+ "sung;": '\U0000266A',
+ "sup;": '\U00002283',
+ "sup1;": '\U000000B9',
+ "sup2;": '\U000000B2',
+ "sup3;": '\U000000B3',
+ "supE;": '\U00002AC6',
+ "supdot;": '\U00002ABE',
+ "supdsub;": '\U00002AD8',
+ "supe;": '\U00002287',
+ "supedot;": '\U00002AC4',
+ "suphsol;": '\U000027C9',
+ "suphsub;": '\U00002AD7',
+ "suplarr;": '\U0000297B',
+ "supmult;": '\U00002AC2',
+ "supnE;": '\U00002ACC',
+ "supne;": '\U0000228B',
+ "supplus;": '\U00002AC0',
+ "supset;": '\U00002283',
+ "supseteq;": '\U00002287',
+ "supseteqq;": '\U00002AC6',
+ "supsetneq;": '\U0000228B',
+ "supsetneqq;": '\U00002ACC',
+ "supsim;": '\U00002AC8',
+ "supsub;": '\U00002AD4',
+ "supsup;": '\U00002AD6',
+ "swArr;": '\U000021D9',
+ "swarhk;": '\U00002926',
+ "swarr;": '\U00002199',
+ "swarrow;": '\U00002199',
+ "swnwar;": '\U0000292A',
+ "szlig;": '\U000000DF',
+ "target;": '\U00002316',
+ "tau;": '\U000003C4',
+ "tbrk;": '\U000023B4',
+ "tcaron;": '\U00000165',
+ "tcedil;": '\U00000163',
+ "tcy;": '\U00000442',
+ "tdot;": '\U000020DB',
+ "telrec;": '\U00002315',
+ "tfr;": '\U0001D531',
+ "there4;": '\U00002234',
+ "therefore;": '\U00002234',
+ "theta;": '\U000003B8',
+ "thetasym;": '\U000003D1',
+ "thetav;": '\U000003D1',
+ "thickapprox;": '\U00002248',
+ "thicksim;": '\U0000223C',
+ "thinsp;": '\U00002009',
+ "thkap;": '\U00002248',
+ "thksim;": '\U0000223C',
+ "thorn;": '\U000000FE',
+ "tilde;": '\U000002DC',
+ "times;": '\U000000D7',
+ "timesb;": '\U000022A0',
+ "timesbar;": '\U00002A31',
+ "timesd;": '\U00002A30',
+ "tint;": '\U0000222D',
+ "toea;": '\U00002928',
+ "top;": '\U000022A4',
+ "topbot;": '\U00002336',
+ "topcir;": '\U00002AF1',
+ "topf;": '\U0001D565',
+ "topfork;": '\U00002ADA',
+ "tosa;": '\U00002929',
+ "tprime;": '\U00002034',
+ "trade;": '\U00002122',
+ "triangle;": '\U000025B5',
+ "triangledown;": '\U000025BF',
+ "triangleleft;": '\U000025C3',
+ "trianglelefteq;": '\U000022B4',
+ "triangleq;": '\U0000225C',
+ "triangleright;": '\U000025B9',
+ "trianglerighteq;": '\U000022B5',
+ "tridot;": '\U000025EC',
+ "trie;": '\U0000225C',
+ "triminus;": '\U00002A3A',
+ "triplus;": '\U00002A39',
+ "trisb;": '\U000029CD',
+ "tritime;": '\U00002A3B',
+ "trpezium;": '\U000023E2',
+ "tscr;": '\U0001D4C9',
+ "tscy;": '\U00000446',
+ "tshcy;": '\U0000045B',
+ "tstrok;": '\U00000167',
+ "twixt;": '\U0000226C',
+ "twoheadleftarrow;": '\U0000219E',
+ "twoheadrightarrow;": '\U000021A0',
+ "uArr;": '\U000021D1',
+ "uHar;": '\U00002963',
+ "uacute;": '\U000000FA',
+ "uarr;": '\U00002191',
+ "ubrcy;": '\U0000045E',
+ "ubreve;": '\U0000016D',
+ "ucirc;": '\U000000FB',
+ "ucy;": '\U00000443',
+ "udarr;": '\U000021C5',
+ "udblac;": '\U00000171',
+ "udhar;": '\U0000296E',
+ "ufisht;": '\U0000297E',
+ "ufr;": '\U0001D532',
+ "ugrave;": '\U000000F9',
+ "uharl;": '\U000021BF',
+ "uharr;": '\U000021BE',
+ "uhblk;": '\U00002580',
+ "ulcorn;": '\U0000231C',
+ "ulcorner;": '\U0000231C',
+ "ulcrop;": '\U0000230F',
+ "ultri;": '\U000025F8',
+ "umacr;": '\U0000016B',
+ "uml;": '\U000000A8',
+ "uogon;": '\U00000173',
+ "uopf;": '\U0001D566',
+ "uparrow;": '\U00002191',
+ "updownarrow;": '\U00002195',
+ "upharpoonleft;": '\U000021BF',
+ "upharpoonright;": '\U000021BE',
+ "uplus;": '\U0000228E',
+ "upsi;": '\U000003C5',
+ "upsih;": '\U000003D2',
+ "upsilon;": '\U000003C5',
+ "upuparrows;": '\U000021C8',
+ "urcorn;": '\U0000231D',
+ "urcorner;": '\U0000231D',
+ "urcrop;": '\U0000230E',
+ "uring;": '\U0000016F',
+ "urtri;": '\U000025F9',
+ "uscr;": '\U0001D4CA',
+ "utdot;": '\U000022F0',
+ "utilde;": '\U00000169',
+ "utri;": '\U000025B5',
+ "utrif;": '\U000025B4',
+ "uuarr;": '\U000021C8',
+ "uuml;": '\U000000FC',
+ "uwangle;": '\U000029A7',
+ "vArr;": '\U000021D5',
+ "vBar;": '\U00002AE8',
+ "vBarv;": '\U00002AE9',
+ "vDash;": '\U000022A8',
+ "vangrt;": '\U0000299C',
+ "varepsilon;": '\U000003F5',
+ "varkappa;": '\U000003F0',
+ "varnothing;": '\U00002205',
+ "varphi;": '\U000003D5',
+ "varpi;": '\U000003D6',
+ "varpropto;": '\U0000221D',
+ "varr;": '\U00002195',
+ "varrho;": '\U000003F1',
+ "varsigma;": '\U000003C2',
+ "vartheta;": '\U000003D1',
+ "vartriangleleft;": '\U000022B2',
+ "vartriangleright;": '\U000022B3',
+ "vcy;": '\U00000432',
+ "vdash;": '\U000022A2',
+ "vee;": '\U00002228',
+ "veebar;": '\U000022BB',
+ "veeeq;": '\U0000225A',
+ "vellip;": '\U000022EE',
+ "verbar;": '\U0000007C',
+ "vert;": '\U0000007C',
+ "vfr;": '\U0001D533',
+ "vltri;": '\U000022B2',
+ "vopf;": '\U0001D567',
+ "vprop;": '\U0000221D',
+ "vrtri;": '\U000022B3',
+ "vscr;": '\U0001D4CB',
+ "vzigzag;": '\U0000299A',
+ "wcirc;": '\U00000175',
+ "wedbar;": '\U00002A5F',
+ "wedge;": '\U00002227',
+ "wedgeq;": '\U00002259',
+ "weierp;": '\U00002118',
+ "wfr;": '\U0001D534',
+ "wopf;": '\U0001D568',
+ "wp;": '\U00002118',
+ "wr;": '\U00002240',
+ "wreath;": '\U00002240',
+ "wscr;": '\U0001D4CC',
+ "xcap;": '\U000022C2',
+ "xcirc;": '\U000025EF',
+ "xcup;": '\U000022C3',
+ "xdtri;": '\U000025BD',
+ "xfr;": '\U0001D535',
+ "xhArr;": '\U000027FA',
+ "xharr;": '\U000027F7',
+ "xi;": '\U000003BE',
+ "xlArr;": '\U000027F8',
+ "xlarr;": '\U000027F5',
+ "xmap;": '\U000027FC',
+ "xnis;": '\U000022FB',
+ "xodot;": '\U00002A00',
+ "xopf;": '\U0001D569',
+ "xoplus;": '\U00002A01',
+ "xotime;": '\U00002A02',
+ "xrArr;": '\U000027F9',
+ "xrarr;": '\U000027F6',
+ "xscr;": '\U0001D4CD',
+ "xsqcup;": '\U00002A06',
+ "xuplus;": '\U00002A04',
+ "xutri;": '\U000025B3',
+ "xvee;": '\U000022C1',
+ "xwedge;": '\U000022C0',
+ "yacute;": '\U000000FD',
+ "yacy;": '\U0000044F',
+ "ycirc;": '\U00000177',
+ "ycy;": '\U0000044B',
+ "yen;": '\U000000A5',
+ "yfr;": '\U0001D536',
+ "yicy;": '\U00000457',
+ "yopf;": '\U0001D56A',
+ "yscr;": '\U0001D4CE',
+ "yucy;": '\U0000044E',
+ "yuml;": '\U000000FF',
+ "zacute;": '\U0000017A',
+ "zcaron;": '\U0000017E',
+ "zcy;": '\U00000437',
+ "zdot;": '\U0000017C',
+ "zeetrf;": '\U00002128',
+ "zeta;": '\U000003B6',
+ "zfr;": '\U0001D537',
+ "zhcy;": '\U00000436',
+ "zigrarr;": '\U000021DD',
+ "zopf;": '\U0001D56B',
+ "zscr;": '\U0001D4CF',
+ "zwj;": '\U0000200D',
+ "zwnj;": '\U0000200C',
+ "AElig": '\U000000C6',
+ "AMP": '\U00000026',
+ "Aacute": '\U000000C1',
+ "Acirc": '\U000000C2',
+ "Agrave": '\U000000C0',
+ "Aring": '\U000000C5',
+ "Atilde": '\U000000C3',
+ "Auml": '\U000000C4',
+ "COPY": '\U000000A9',
+ "Ccedil": '\U000000C7',
+ "ETH": '\U000000D0',
+ "Eacute": '\U000000C9',
+ "Ecirc": '\U000000CA',
+ "Egrave": '\U000000C8',
+ "Euml": '\U000000CB',
+ "GT": '\U0000003E',
+ "Iacute": '\U000000CD',
+ "Icirc": '\U000000CE',
+ "Igrave": '\U000000CC',
+ "Iuml": '\U000000CF',
+ "LT": '\U0000003C',
+ "Ntilde": '\U000000D1',
+ "Oacute": '\U000000D3',
+ "Ocirc": '\U000000D4',
+ "Ograve": '\U000000D2',
+ "Oslash": '\U000000D8',
+ "Otilde": '\U000000D5',
+ "Ouml": '\U000000D6',
+ "QUOT": '\U00000022',
+ "REG": '\U000000AE',
+ "THORN": '\U000000DE',
+ "Uacute": '\U000000DA',
+ "Ucirc": '\U000000DB',
+ "Ugrave": '\U000000D9',
+ "Uuml": '\U000000DC',
+ "Yacute": '\U000000DD',
+ "aacute": '\U000000E1',
+ "acirc": '\U000000E2',
+ "acute": '\U000000B4',
+ "aelig": '\U000000E6',
+ "agrave": '\U000000E0',
+ "amp": '\U00000026',
+ "aring": '\U000000E5',
+ "atilde": '\U000000E3',
+ "auml": '\U000000E4',
+ "brvbar": '\U000000A6',
+ "ccedil": '\U000000E7',
+ "cedil": '\U000000B8',
+ "cent": '\U000000A2',
+ "copy": '\U000000A9',
+ "curren": '\U000000A4',
+ "deg": '\U000000B0',
+ "divide": '\U000000F7',
+ "eacute": '\U000000E9',
+ "ecirc": '\U000000EA',
+ "egrave": '\U000000E8',
+ "eth": '\U000000F0',
+ "euml": '\U000000EB',
+ "frac12": '\U000000BD',
+ "frac14": '\U000000BC',
+ "frac34": '\U000000BE',
+ "gt": '\U0000003E',
+ "iacute": '\U000000ED',
+ "icirc": '\U000000EE',
+ "iexcl": '\U000000A1',
+ "igrave": '\U000000EC',
+ "iquest": '\U000000BF',
+ "iuml": '\U000000EF',
+ "laquo": '\U000000AB',
+ "lt": '\U0000003C',
+ "macr": '\U000000AF',
+ "micro": '\U000000B5',
+ "middot": '\U000000B7',
+ "nbsp": '\U000000A0',
+ "not": '\U000000AC',
+ "ntilde": '\U000000F1',
+ "oacute": '\U000000F3',
+ "ocirc": '\U000000F4',
+ "ograve": '\U000000F2',
+ "ordf": '\U000000AA',
+ "ordm": '\U000000BA',
+ "oslash": '\U000000F8',
+ "otilde": '\U000000F5',
+ "ouml": '\U000000F6',
+ "para": '\U000000B6',
+ "plusmn": '\U000000B1',
+ "pound": '\U000000A3',
+ "quot": '\U00000022',
+ "raquo": '\U000000BB',
+ "reg": '\U000000AE',
+ "sect": '\U000000A7',
+ "shy": '\U000000AD',
+ "sup1": '\U000000B9',
+ "sup2": '\U000000B2',
+ "sup3": '\U000000B3',
+ "szlig": '\U000000DF',
+ "thorn": '\U000000FE',
+ "times": '\U000000D7',
+ "uacute": '\U000000FA',
+ "ucirc": '\U000000FB',
+ "ugrave": '\U000000F9',
+ "uml": '\U000000A8',
+ "uuml": '\U000000FC',
+ "yacute": '\U000000FD',
+ "yen": '\U000000A5',
+ "yuml": '\U000000FF',
+}
+
+// HTML entities that are two unicode codepoints.
+var entity2 = map[string][2]rune{
+ // TODO(nigeltao): Handle replacements that are wider than their names.
+ // "nLt;": {'\u226A', '\u20D2'},
+ // "nGt;": {'\u226B', '\u20D2'},
+ "NotEqualTilde;": {'\u2242', '\u0338'},
+ "NotGreaterFullEqual;": {'\u2267', '\u0338'},
+ "NotGreaterGreater;": {'\u226B', '\u0338'},
+ "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'},
+ "NotHumpDownHump;": {'\u224E', '\u0338'},
+ "NotHumpEqual;": {'\u224F', '\u0338'},
+ "NotLeftTriangleBar;": {'\u29CF', '\u0338'},
+ "NotLessLess;": {'\u226A', '\u0338'},
+ "NotLessSlantEqual;": {'\u2A7D', '\u0338'},
+ "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'},
+ "NotNestedLessLess;": {'\u2AA1', '\u0338'},
+ "NotPrecedesEqual;": {'\u2AAF', '\u0338'},
+ "NotRightTriangleBar;": {'\u29D0', '\u0338'},
+ "NotSquareSubset;": {'\u228F', '\u0338'},
+ "NotSquareSuperset;": {'\u2290', '\u0338'},
+ "NotSubset;": {'\u2282', '\u20D2'},
+ "NotSucceedsEqual;": {'\u2AB0', '\u0338'},
+ "NotSucceedsTilde;": {'\u227F', '\u0338'},
+ "NotSuperset;": {'\u2283', '\u20D2'},
+ "ThickSpace;": {'\u205F', '\u200A'},
+ "acE;": {'\u223E', '\u0333'},
+ "bne;": {'\u003D', '\u20E5'},
+ "bnequiv;": {'\u2261', '\u20E5'},
+ "caps;": {'\u2229', '\uFE00'},
+ "cups;": {'\u222A', '\uFE00'},
+ "fjlig;": {'\u0066', '\u006A'},
+ "gesl;": {'\u22DB', '\uFE00'},
+ "gvertneqq;": {'\u2269', '\uFE00'},
+ "gvnE;": {'\u2269', '\uFE00'},
+ "lates;": {'\u2AAD', '\uFE00'},
+ "lesg;": {'\u22DA', '\uFE00'},
+ "lvertneqq;": {'\u2268', '\uFE00'},
+ "lvnE;": {'\u2268', '\uFE00'},
+ "nGg;": {'\u22D9', '\u0338'},
+ "nGtv;": {'\u226B', '\u0338'},
+ "nLl;": {'\u22D8', '\u0338'},
+ "nLtv;": {'\u226A', '\u0338'},
+ "nang;": {'\u2220', '\u20D2'},
+ "napE;": {'\u2A70', '\u0338'},
+ "napid;": {'\u224B', '\u0338'},
+ "nbump;": {'\u224E', '\u0338'},
+ "nbumpe;": {'\u224F', '\u0338'},
+ "ncongdot;": {'\u2A6D', '\u0338'},
+ "nedot;": {'\u2250', '\u0338'},
+ "nesim;": {'\u2242', '\u0338'},
+ "ngE;": {'\u2267', '\u0338'},
+ "ngeqq;": {'\u2267', '\u0338'},
+ "ngeqslant;": {'\u2A7E', '\u0338'},
+ "nges;": {'\u2A7E', '\u0338'},
+ "nlE;": {'\u2266', '\u0338'},
+ "nleqq;": {'\u2266', '\u0338'},
+ "nleqslant;": {'\u2A7D', '\u0338'},
+ "nles;": {'\u2A7D', '\u0338'},
+ "notinE;": {'\u22F9', '\u0338'},
+ "notindot;": {'\u22F5', '\u0338'},
+ "nparsl;": {'\u2AFD', '\u20E5'},
+ "npart;": {'\u2202', '\u0338'},
+ "npre;": {'\u2AAF', '\u0338'},
+ "npreceq;": {'\u2AAF', '\u0338'},
+ "nrarrc;": {'\u2933', '\u0338'},
+ "nrarrw;": {'\u219D', '\u0338'},
+ "nsce;": {'\u2AB0', '\u0338'},
+ "nsubE;": {'\u2AC5', '\u0338'},
+ "nsubset;": {'\u2282', '\u20D2'},
+ "nsubseteqq;": {'\u2AC5', '\u0338'},
+ "nsucceq;": {'\u2AB0', '\u0338'},
+ "nsupE;": {'\u2AC6', '\u0338'},
+ "nsupset;": {'\u2283', '\u20D2'},
+ "nsupseteqq;": {'\u2AC6', '\u0338'},
+ "nvap;": {'\u224D', '\u20D2'},
+ "nvge;": {'\u2265', '\u20D2'},
+ "nvgt;": {'\u003E', '\u20D2'},
+ "nvle;": {'\u2264', '\u20D2'},
+ "nvlt;": {'\u003C', '\u20D2'},
+ "nvltrie;": {'\u22B4', '\u20D2'},
+ "nvrtrie;": {'\u22B5', '\u20D2'},
+ "nvsim;": {'\u223C', '\u20D2'},
+ "race;": {'\u223D', '\u0331'},
+ "smtes;": {'\u2AAC', '\uFE00'},
+ "sqcaps;": {'\u2293', '\uFE00'},
+ "sqcups;": {'\u2294', '\uFE00'},
+ "varsubsetneq;": {'\u228A', '\uFE00'},
+ "varsubsetneqq;": {'\u2ACB', '\uFE00'},
+ "varsupsetneq;": {'\u228B', '\uFE00'},
+ "varsupsetneqq;": {'\u2ACC', '\uFE00'},
+ "vnsub;": {'\u2282', '\u20D2'},
+ "vnsup;": {'\u2283', '\u20D2'},
+ "vsubnE;": {'\u2ACB', '\uFE00'},
+ "vsubne;": {'\u228A', '\uFE00'},
+ "vsupnE;": {'\u2ACC', '\uFE00'},
+ "vsupne;": {'\u228B', '\uFE00'},
+}
diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go
new file mode 100644
index 000000000..d85613962
--- /dev/null
+++ b/vendor/golang.org/x/net/html/escape.go
@@ -0,0 +1,258 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "strings"
+ "unicode/utf8"
+)
+
+// These replacements permit compatibility with old numeric entities that
+// assumed Windows-1252 encoding.
+// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
+var replacementTable = [...]rune{
+ '\u20AC', // First entry is what 0x80 should be replaced with.
+ '\u0081',
+ '\u201A',
+ '\u0192',
+ '\u201E',
+ '\u2026',
+ '\u2020',
+ '\u2021',
+ '\u02C6',
+ '\u2030',
+ '\u0160',
+ '\u2039',
+ '\u0152',
+ '\u008D',
+ '\u017D',
+ '\u008F',
+ '\u0090',
+ '\u2018',
+ '\u2019',
+ '\u201C',
+ '\u201D',
+ '\u2022',
+ '\u2013',
+ '\u2014',
+ '\u02DC',
+ '\u2122',
+ '\u0161',
+ '\u203A',
+ '\u0153',
+ '\u009D',
+ '\u017E',
+ '\u0178', // Last entry is 0x9F.
+ // 0x00->'\uFFFD' is handled programmatically.
+ // 0x0D->'\u000D' is a no-op.
+}
+
+// unescapeEntity reads an entity like "&lt;" from b[src:] and writes the
+// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
+// Precondition: b[src] == '&' && dst <= src.
+// attribute should be true if parsing an attribute value.
+func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
+ // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
+
+ // i starts at 1 because we already know that s[0] == '&'.
+ i, s := 1, b[src:]
+
+ if len(s) <= 1 {
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+
+ if s[i] == '#' {
+ if len(s) <= 3 { // We need to have at least "&#.".
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+ i++
+ c := s[i]
+ hex := false
+ if c == 'x' || c == 'X' {
+ hex = true
+ i++
+ }
+
+ x := '\x00'
+ for i < len(s) {
+ c = s[i]
+ i++
+ if hex {
+ if '0' <= c && c <= '9' {
+ x = 16*x + rune(c) - '0'
+ continue
+ } else if 'a' <= c && c <= 'f' {
+ x = 16*x + rune(c) - 'a' + 10
+ continue
+ } else if 'A' <= c && c <= 'F' {
+ x = 16*x + rune(c) - 'A' + 10
+ continue
+ }
+ } else if '0' <= c && c <= '9' {
+ x = 10*x + rune(c) - '0'
+ continue
+ }
+ if c != ';' {
+ i--
+ }
+ break
+ }
+
+ if i <= 3 { // No characters matched.
+ b[dst] = b[src]
+ return dst + 1, src + 1
+ }
+
+ if 0x80 <= x && x <= 0x9F {
+ // Replace characters from Windows-1252 with UTF-8 equivalents.
+ x = replacementTable[x-0x80]
+ } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
+ // Replace invalid characters with the replacement character.
+ x = '\uFFFD'
+ }
+
+ return dst + utf8.EncodeRune(b[dst:], x), src + i
+ }
+
+ // Consume the maximum number of characters possible, with the
+ // consumed characters matching one of the named references.
+
+ for i < len(s) {
+ c := s[i]
+ i++
+ // Lower-cased characters are more common in entities, so we check for them first.
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+ continue
+ }
+ if c != ';' {
+ i--
+ }
+ break
+ }
+
+ entityName := string(s[1:i])
+ if entityName == "" {
+ // No-op.
+ } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
+ // No-op.
+ } else if x := entity[entityName]; x != 0 {
+ return dst + utf8.EncodeRune(b[dst:], x), src + i
+ } else if x := entity2[entityName]; x[0] != 0 {
+ dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
+ return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
+ } else if !attribute {
+ maxLen := len(entityName) - 1
+ if maxLen > longestEntityWithoutSemicolon {
+ maxLen = longestEntityWithoutSemicolon
+ }
+ for j := maxLen; j > 1; j-- {
+ if x := entity[entityName[:j]]; x != 0 {
+ return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
+ }
+ }
+ }
+
+ dst1, src1 = dst+i, src+i
+ copy(b[dst:dst1], b[src:src1])
+ return dst1, src1
+}
+
+// unescape unescapes b's entities in-place, so that "a&lt;b" becomes "a<b".
+// attribute should be true if parsing an attribute value.
+func unescape(b []byte, attribute bool) []byte {
+ for i, c := range b {
+ if c == '&' {
+ dst, src := unescapeEntity(b, i, i, attribute)
+ for src < len(b) {
+ c := b[src]
+ if c == '&' {
+ dst, src = unescapeEntity(b, dst, src, attribute)
+ } else {
+ b[dst] = c
+ dst, src = dst+1, src+1
+ }
+ }
+ return b[0:dst]
+ }
+ }
+ return b
+}
+
+// lower lower-cases the A-Z bytes in b in-place, so that "aBc" becomes "abc".
+func lower(b []byte) []byte {
+ for i, c := range b {
+ if 'A' <= c && c <= 'Z' {
+ b[i] = c + 'a' - 'A'
+ }
+ }
+ return b
+}
+
+const escapedChars = "&'<>\"\r"
+
+func escape(w writer, s string) error {
+ i := strings.IndexAny(s, escapedChars)
+ for i != -1 {
+ if _, err := w.WriteString(s[:i]); err != nil {
+ return err
+ }
+ var esc string
+ switch s[i] {
+ case '&':
+ esc = "&amp;"
+ case '\'':
+ // "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
+ esc = "&#39;"
+ case '<':
+ esc = "&lt;"
+ case '>':
+ esc = "&gt;"
+ case '"':
+ // "&#34;" is shorter than "&quot;".
+ esc = "&#34;"
+ case '\r':
+ esc = "&#13;"
+ default:
+ panic("unrecognized escape character")
+ }
+ s = s[i+1:]
+ if _, err := w.WriteString(esc); err != nil {
+ return err
+ }
+ i = strings.IndexAny(s, escapedChars)
+ }
+ _, err := w.WriteString(s)
+ return err
+}
+
+// EscapeString escapes special characters like "<" to become "&lt;". It
+// escapes only five such characters: <, >, &, ' and ".
+// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
+// always true.
+func EscapeString(s string) string {
+ if strings.IndexAny(s, escapedChars) == -1 {
+ return s
+ }
+ var buf bytes.Buffer
+ escape(&buf, s)
+ return buf.String()
+}
+
+// UnescapeString unescapes entities like "&lt;" to become "<". It unescapes a
+// larger range of entities than EscapeString escapes. For example, "&aacute;"
+// unescapes to "á", as does "&#225;" and "&xE1;".
+// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
+// always true.
+func UnescapeString(s string) string {
+ for _, c := range s {
+ if c == '&' {
+ return string(unescape([]byte(s), false))
+ }
+ }
+ return s
+}
diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go
new file mode 100644
index 000000000..d3b384409
--- /dev/null
+++ b/vendor/golang.org/x/net/html/foreign.go
@@ -0,0 +1,226 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "strings"
+)
+
+func adjustAttributeNames(aa []Attribute, nameMap map[string]string) {
+ for i := range aa {
+ if newName, ok := nameMap[aa[i].Key]; ok {
+ aa[i].Key = newName
+ }
+ }
+}
+
+func adjustForeignAttributes(aa []Attribute) {
+ for i, a := range aa {
+ if a.Key == "" || a.Key[0] != 'x' {
+ continue
+ }
+ switch a.Key {
+ case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
+ "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
+ j := strings.Index(a.Key, ":")
+ aa[i].Namespace = a.Key[:j]
+ aa[i].Key = a.Key[j+1:]
+ }
+ }
+}
+
+func htmlIntegrationPoint(n *Node) bool {
+ if n.Type != ElementNode {
+ return false
+ }
+ switch n.Namespace {
+ case "math":
+ if n.Data == "annotation-xml" {
+ for _, a := range n.Attr {
+ if a.Key == "encoding" {
+ val := strings.ToLower(a.Val)
+ if val == "text/html" || val == "application/xhtml+xml" {
+ return true
+ }
+ }
+ }
+ }
+ case "svg":
+ switch n.Data {
+ case "desc", "foreignObject", "title":
+ return true
+ }
+ }
+ return false
+}
+
+func mathMLTextIntegrationPoint(n *Node) bool {
+ if n.Namespace != "math" {
+ return false
+ }
+ switch n.Data {
+ case "mi", "mo", "mn", "ms", "mtext":
+ return true
+ }
+ return false
+}
+
+// Section 12.2.5.5.
+var breakout = map[string]bool{
+ "b": true,
+ "big": true,
+ "blockquote": true,
+ "body": true,
+ "br": true,
+ "center": true,
+ "code": true,
+ "dd": true,
+ "div": true,
+ "dl": true,
+ "dt": true,
+ "em": true,
+ "embed": true,
+ "h1": true,
+ "h2": true,
+ "h3": true,
+ "h4": true,
+ "h5": true,
+ "h6": true,
+ "head": true,
+ "hr": true,
+ "i": true,
+ "img": true,
+ "li": true,
+ "listing": true,
+ "menu": true,
+ "meta": true,
+ "nobr": true,
+ "ol": true,
+ "p": true,
+ "pre": true,
+ "ruby": true,
+ "s": true,
+ "small": true,
+ "span": true,
+ "strong": true,
+ "strike": true,
+ "sub": true,
+ "sup": true,
+ "table": true,
+ "tt": true,
+ "u": true,
+ "ul": true,
+ "var": true,
+}
+
+// Section 12.2.5.5.
+var svgTagNameAdjustments = map[string]string{
+ "altglyph": "altGlyph",
+ "altglyphdef": "altGlyphDef",
+ "altglyphitem": "altGlyphItem",
+ "animatecolor": "animateColor",
+ "animatemotion": "animateMotion",
+ "animatetransform": "animateTransform",
+ "clippath": "clipPath",
+ "feblend": "feBlend",
+ "fecolormatrix": "feColorMatrix",
+ "fecomponenttransfer": "feComponentTransfer",
+ "fecomposite": "feComposite",
+ "feconvolvematrix": "feConvolveMatrix",
+ "fediffuselighting": "feDiffuseLighting",
+ "fedisplacementmap": "feDisplacementMap",
+ "fedistantlight": "feDistantLight",
+ "feflood": "feFlood",
+ "fefunca": "feFuncA",
+ "fefuncb": "feFuncB",
+ "fefuncg": "feFuncG",
+ "fefuncr": "feFuncR",
+ "fegaussianblur": "feGaussianBlur",
+ "feimage": "feImage",
+ "femerge": "feMerge",
+ "femergenode": "feMergeNode",
+ "femorphology": "feMorphology",
+ "feoffset": "feOffset",
+ "fepointlight": "fePointLight",
+ "fespecularlighting": "feSpecularLighting",
+ "fespotlight": "feSpotLight",
+ "fetile": "feTile",
+ "feturbulence": "feTurbulence",
+ "foreignobject": "foreignObject",
+ "glyphref": "glyphRef",
+ "lineargradient": "linearGradient",
+ "radialgradient": "radialGradient",
+ "textpath": "textPath",
+}
+
+// Section 12.2.5.1
+var mathMLAttributeAdjustments = map[string]string{
+ "definitionurl": "definitionURL",
+}
+
+var svgAttributeAdjustments = map[string]string{
+ "attributename": "attributeName",
+ "attributetype": "attributeType",
+ "basefrequency": "baseFrequency",
+ "baseprofile": "baseProfile",
+ "calcmode": "calcMode",
+ "clippathunits": "clipPathUnits",
+ "contentscripttype": "contentScriptType",
+ "contentstyletype": "contentStyleType",
+ "diffuseconstant": "diffuseConstant",
+ "edgemode": "edgeMode",
+ "externalresourcesrequired": "externalResourcesRequired",
+ "filterres": "filterRes",
+ "filterunits": "filterUnits",
+ "glyphref": "glyphRef",
+ "gradienttransform": "gradientTransform",
+ "gradientunits": "gradientUnits",
+ "kernelmatrix": "kernelMatrix",
+ "kernelunitlength": "kernelUnitLength",
+ "keypoints": "keyPoints",
+ "keysplines": "keySplines",
+ "keytimes": "keyTimes",
+ "lengthadjust": "lengthAdjust",
+ "limitingconeangle": "limitingConeAngle",
+ "markerheight": "markerHeight",
+ "markerunits": "markerUnits",
+ "markerwidth": "markerWidth",
+ "maskcontentunits": "maskContentUnits",
+ "maskunits": "maskUnits",
+ "numoctaves": "numOctaves",
+ "pathlength": "pathLength",
+ "patterncontentunits": "patternContentUnits",
+ "patterntransform": "patternTransform",
+ "patternunits": "patternUnits",
+ "pointsatx": "pointsAtX",
+ "pointsaty": "pointsAtY",
+ "pointsatz": "pointsAtZ",
+ "preservealpha": "preserveAlpha",
+ "preserveaspectratio": "preserveAspectRatio",
+ "primitiveunits": "primitiveUnits",
+ "refx": "refX",
+ "refy": "refY",
+ "repeatcount": "repeatCount",
+ "repeatdur": "repeatDur",
+ "requiredextensions": "requiredExtensions",
+ "requiredfeatures": "requiredFeatures",
+ "specularconstant": "specularConstant",
+ "specularexponent": "specularExponent",
+ "spreadmethod": "spreadMethod",
+ "startoffset": "startOffset",
+ "stddeviation": "stdDeviation",
+ "stitchtiles": "stitchTiles",
+ "surfacescale": "surfaceScale",
+ "systemlanguage": "systemLanguage",
+ "tablevalues": "tableValues",
+ "targetx": "targetX",
+ "targety": "targetY",
+ "textlength": "textLength",
+ "viewbox": "viewBox",
+ "viewtarget": "viewTarget",
+ "xchannelselector": "xChannelSelector",
+ "ychannelselector": "yChannelSelector",
+ "zoomandpan": "zoomAndPan",
+}
diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go
new file mode 100644
index 000000000..26b657aec
--- /dev/null
+++ b/vendor/golang.org/x/net/html/node.go
@@ -0,0 +1,193 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "golang.org/x/net/html/atom"
+)
+
+// A NodeType is the type of a Node.
+type NodeType uint32
+
+const (
+ ErrorNode NodeType = iota
+ TextNode
+ DocumentNode
+ ElementNode
+ CommentNode
+ DoctypeNode
+ scopeMarkerNode
+)
+
+// Section 12.2.3.3 says "scope markers are inserted when entering applet
+// elements, buttons, object elements, marquees, table cells, and table
+// captions, and are used to prevent formatting from 'leaking'".
+var scopeMarker = Node{Type: scopeMarkerNode}
+
+// A Node consists of a NodeType and some Data (tag name for element nodes,
+// content for text) and are part of a tree of Nodes. Element nodes may also
+// have a Namespace and contain a slice of Attributes. Data is unescaped, so
+// that it looks like "a<b" rather than "a&lt;b". For element nodes, DataAtom
+// is the atom for Data, or zero if Data is not a known tag name.
+//
+// An empty Namespace implies a "http://www.w3.org/1999/xhtml" namespace.
+// Similarly, "math" is short for "http://www.w3.org/1998/Math/MathML", and
+// "svg" is short for "http://www.w3.org/2000/svg".
+type Node struct {
+ Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
+
+ Type NodeType
+ DataAtom atom.Atom
+ Data string
+ Namespace string
+ Attr []Attribute
+}
+
+// InsertBefore inserts newChild as a child of n, immediately before oldChild
+// in the sequence of n's children. oldChild may be nil, in which case newChild
+// is appended to the end of n's children.
+//
+// It will panic if newChild already has a parent or siblings.
+func (n *Node) InsertBefore(newChild, oldChild *Node) {
+ if newChild.Parent != nil || newChild.PrevSibling != nil || newChild.NextSibling != nil {
+ panic("html: InsertBefore called for an attached child Node")
+ }
+ var prev, next *Node
+ if oldChild != nil {
+ prev, next = oldChild.PrevSibling, oldChild
+ } else {
+ prev = n.LastChild
+ }
+ if prev != nil {
+ prev.NextSibling = newChild
+ } else {
+ n.FirstChild = newChild
+ }
+ if next != nil {
+ next.PrevSibling = newChild
+ } else {
+ n.LastChild = newChild
+ }
+ newChild.Parent = n
+ newChild.PrevSibling = prev
+ newChild.NextSibling = next
+}
+
+// AppendChild adds a node c as a child of n.
+//
+// It will panic if c already has a parent or siblings.
+func (n *Node) AppendChild(c *Node) {
+ if c.Parent != nil || c.PrevSibling != nil || c.NextSibling != nil {
+ panic("html: AppendChild called for an attached child Node")
+ }
+ last := n.LastChild
+ if last != nil {
+ last.NextSibling = c
+ } else {
+ n.FirstChild = c
+ }
+ n.LastChild = c
+ c.Parent = n
+ c.PrevSibling = last
+}
+
+// RemoveChild removes a node c that is a child of n. Afterwards, c will have
+// no parent and no siblings.
+//
+// It will panic if c's parent is not n.
+func (n *Node) RemoveChild(c *Node) {
+ if c.Parent != n {
+ panic("html: RemoveChild called for a non-child Node")
+ }
+ if n.FirstChild == c {
+ n.FirstChild = c.NextSibling
+ }
+ if c.NextSibling != nil {
+ c.NextSibling.PrevSibling = c.PrevSibling
+ }
+ if n.LastChild == c {
+ n.LastChild = c.PrevSibling
+ }
+ if c.PrevSibling != nil {
+ c.PrevSibling.NextSibling = c.NextSibling
+ }
+ c.Parent = nil
+ c.PrevSibling = nil
+ c.NextSibling = nil
+}
+
+// reparentChildren reparents all of src's child nodes to dst.
+func reparentChildren(dst, src *Node) {
+ for {
+ child := src.FirstChild
+ if child == nil {
+ break
+ }
+ src.RemoveChild(child)
+ dst.AppendChild(child)
+ }
+}
+
+// clone returns a new node with the same type, data and attributes.
+// The clone has no parent, no siblings and no children.
+func (n *Node) clone() *Node {
+ m := &Node{
+ Type: n.Type,
+ DataAtom: n.DataAtom,
+ Data: n.Data,
+ Attr: make([]Attribute, len(n.Attr)),
+ }
+ copy(m.Attr, n.Attr)
+ return m
+}
+
+// nodeStack is a stack of nodes.
+type nodeStack []*Node
+
+// pop pops the stack. It will panic if s is empty.
+func (s *nodeStack) pop() *Node {
+ i := len(*s)
+ n := (*s)[i-1]
+ *s = (*s)[:i-1]
+ return n
+}
+
+// top returns the most recently pushed node, or nil if s is empty.
+func (s *nodeStack) top() *Node {
+ if i := len(*s); i > 0 {
+ return (*s)[i-1]
+ }
+ return nil
+}
+
+// index returns the index of the top-most occurrence of n in the stack, or -1
+// if n is not present.
+func (s *nodeStack) index(n *Node) int {
+ for i := len(*s) - 1; i >= 0; i-- {
+ if (*s)[i] == n {
+ return i
+ }
+ }
+ return -1
+}
+
+// insert inserts a node at the given index.
+func (s *nodeStack) insert(i int, n *Node) {
+ (*s) = append(*s, nil)
+ copy((*s)[i+1:], (*s)[i:])
+ (*s)[i] = n
+}
+
+// remove removes a node from the stack. It is a no-op if n is not present.
+func (s *nodeStack) remove(n *Node) {
+ i := s.index(n)
+ if i == -1 {
+ return
+ }
+ copy((*s)[i:], (*s)[i+1:])
+ j := len(*s) - 1
+ (*s)[j] = nil
+ *s = (*s)[:j]
+}
diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go
new file mode 100644
index 000000000..be4b2bf5a
--- /dev/null
+++ b/vendor/golang.org/x/net/html/parse.go
@@ -0,0 +1,2094 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ a "golang.org/x/net/html/atom"
+)
+
+// A parser implements the HTML5 parsing algorithm:
+// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction
+type parser struct {
+ // tokenizer provides the tokens for the parser.
+ tokenizer *Tokenizer
+ // tok is the most recently read token.
+ tok Token
+ // Self-closing tags like <hr/> are treated as start tags, except that
+ // hasSelfClosingToken is set while they are being processed.
+ hasSelfClosingToken bool
+ // doc is the document root element.
+ doc *Node
+ // The stack of open elements (section 12.2.3.2) and active formatting
+ // elements (section 12.2.3.3).
+ oe, afe nodeStack
+ // Element pointers (section 12.2.3.4).
+ head, form *Node
+ // Other parsing state flags (section 12.2.3.5).
+ scripting, framesetOK bool
+ // im is the current insertion mode.
+ im insertionMode
+ // originalIM is the insertion mode to go back to after completing a text
+ // or inTableText insertion mode.
+ originalIM insertionMode
+ // fosterParenting is whether new elements should be inserted according to
+ // the foster parenting rules (section 12.2.5.3).
+ fosterParenting bool
+ // quirks is whether the parser is operating in "quirks mode."
+ quirks bool
+ // fragment is whether the parser is parsing an HTML fragment.
+ fragment bool
+ // context is the context element when parsing an HTML fragment
+ // (section 12.4).
+ context *Node
+}
+
+func (p *parser) top() *Node {
+ if n := p.oe.top(); n != nil {
+ return n
+ }
+ return p.doc
+}
+
+// Stop tags for use in popUntil. These come from section 12.2.3.2.
+var (
+ defaultScopeStopTags = map[string][]a.Atom{
+ "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template},
+ "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext},
+ "svg": {a.Desc, a.ForeignObject, a.Title},
+ }
+)
+
+type scope int
+
+const (
+ defaultScope scope = iota
+ listItemScope
+ buttonScope
+ tableScope
+ tableRowScope
+ tableBodyScope
+ selectScope
+)
+
+// popUntil pops the stack of open elements at the highest element whose tag
+// is in matchTags, provided there is no higher element in the scope's stop
+// tags (as defined in section 12.2.3.2). It returns whether or not there was
+// such an element. If there was not, popUntil leaves the stack unchanged.
+//
+// For example, the set of stop tags for table scope is: "html", "table". If
+// the stack was:
+// ["html", "body", "font", "table", "b", "i", "u"]
+// then popUntil(tableScope, "font") would return false, but
+// popUntil(tableScope, "i") would return true and the stack would become:
+// ["html", "body", "font", "table", "b"]
+//
+// If an element's tag is in both the stop tags and matchTags, then the stack
+// will be popped and the function returns true (provided, of course, there was
+// no higher element in the stack that was also in the stop tags). For example,
+// popUntil(tableScope, "table") returns true and leaves:
+// ["html", "body", "font"]
+func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool {
+ if i := p.indexOfElementInScope(s, matchTags...); i != -1 {
+ p.oe = p.oe[:i]
+ return true
+ }
+ return false
+}
+
+// indexOfElementInScope returns the index in p.oe of the highest element whose
+// tag is in matchTags that is in scope. If no matching element is in scope, it
+// returns -1.
+func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int {
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ tagAtom := p.oe[i].DataAtom
+ if p.oe[i].Namespace == "" {
+ for _, t := range matchTags {
+ if t == tagAtom {
+ return i
+ }
+ }
+ switch s {
+ case defaultScope:
+ // No-op.
+ case listItemScope:
+ if tagAtom == a.Ol || tagAtom == a.Ul {
+ return -1
+ }
+ case buttonScope:
+ if tagAtom == a.Button {
+ return -1
+ }
+ case tableScope:
+ if tagAtom == a.Html || tagAtom == a.Table {
+ return -1
+ }
+ case selectScope:
+ if tagAtom != a.Optgroup && tagAtom != a.Option {
+ return -1
+ }
+ default:
+ panic("unreachable")
+ }
+ }
+ switch s {
+ case defaultScope, listItemScope, buttonScope:
+ for _, t := range defaultScopeStopTags[p.oe[i].Namespace] {
+ if t == tagAtom {
+ return -1
+ }
+ }
+ }
+ }
+ return -1
+}
+
+// elementInScope is like popUntil, except that it doesn't modify the stack of
+// open elements.
+func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool {
+ return p.indexOfElementInScope(s, matchTags...) != -1
+}
+
+// clearStackToContext pops elements off the stack of open elements until a
+// scope-defined element is found.
+func (p *parser) clearStackToContext(s scope) {
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ tagAtom := p.oe[i].DataAtom
+ switch s {
+ case tableScope:
+ if tagAtom == a.Html || tagAtom == a.Table {
+ p.oe = p.oe[:i+1]
+ return
+ }
+ case tableRowScope:
+ if tagAtom == a.Html || tagAtom == a.Tr {
+ p.oe = p.oe[:i+1]
+ return
+ }
+ case tableBodyScope:
+ if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead {
+ p.oe = p.oe[:i+1]
+ return
+ }
+ default:
+ panic("unreachable")
+ }
+ }
+}
+
+// generateImpliedEndTags pops nodes off the stack of open elements as long as
+// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt.
+// If exceptions are specified, nodes with that name will not be popped off.
+func (p *parser) generateImpliedEndTags(exceptions ...string) {
+ var i int
+loop:
+ for i = len(p.oe) - 1; i >= 0; i-- {
+ n := p.oe[i]
+ if n.Type == ElementNode {
+ switch n.DataAtom {
+ case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt:
+ for _, except := range exceptions {
+ if n.Data == except {
+ break loop
+ }
+ }
+ continue
+ }
+ }
+ break
+ }
+
+ p.oe = p.oe[:i+1]
+}
+
+// addChild adds a child node n to the top element, and pushes n onto the stack
+// of open elements if it is an element node.
+func (p *parser) addChild(n *Node) {
+ if p.shouldFosterParent() {
+ p.fosterParent(n)
+ } else {
+ p.top().AppendChild(n)
+ }
+
+ if n.Type == ElementNode {
+ p.oe = append(p.oe, n)
+ }
+}
+
+// shouldFosterParent returns whether the next node to be added should be
+// foster parented.
+func (p *parser) shouldFosterParent() bool {
+ if p.fosterParenting {
+ switch p.top().DataAtom {
+ case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ return true
+ }
+ }
+ return false
+}
+
+// fosterParent adds a child node according to the foster parenting rules.
+// Section 12.2.5.3, "foster parenting".
+func (p *parser) fosterParent(n *Node) {
+ var table, parent, prev *Node
+ var i int
+ for i = len(p.oe) - 1; i >= 0; i-- {
+ if p.oe[i].DataAtom == a.Table {
+ table = p.oe[i]
+ break
+ }
+ }
+
+ if table == nil {
+ // The foster parent is the html element.
+ parent = p.oe[0]
+ } else {
+ parent = table.Parent
+ }
+ if parent == nil {
+ parent = p.oe[i-1]
+ }
+
+ if table != nil {
+ prev = table.PrevSibling
+ } else {
+ prev = parent.LastChild
+ }
+ if prev != nil && prev.Type == TextNode && n.Type == TextNode {
+ prev.Data += n.Data
+ return
+ }
+
+ parent.InsertBefore(n, table)
+}
+
+// addText adds text to the preceding node if it is a text node, or else it
+// calls addChild with a new text node.
+func (p *parser) addText(text string) {
+ if text == "" {
+ return
+ }
+
+ if p.shouldFosterParent() {
+ p.fosterParent(&Node{
+ Type: TextNode,
+ Data: text,
+ })
+ return
+ }
+
+ t := p.top()
+ if n := t.LastChild; n != nil && n.Type == TextNode {
+ n.Data += text
+ return
+ }
+ p.addChild(&Node{
+ Type: TextNode,
+ Data: text,
+ })
+}
+
+// addElement adds a child element based on the current token.
+func (p *parser) addElement() {
+ p.addChild(&Node{
+ Type: ElementNode,
+ DataAtom: p.tok.DataAtom,
+ Data: p.tok.Data,
+ Attr: p.tok.Attr,
+ })
+}
+
+// Section 12.2.3.3.
+func (p *parser) addFormattingElement() {
+ tagAtom, attr := p.tok.DataAtom, p.tok.Attr
+ p.addElement()
+
+ // Implement the Noah's Ark clause, but with three per family instead of two.
+ identicalElements := 0
+findIdenticalElements:
+ for i := len(p.afe) - 1; i >= 0; i-- {
+ n := p.afe[i]
+ if n.Type == scopeMarkerNode {
+ break
+ }
+ if n.Type != ElementNode {
+ continue
+ }
+ if n.Namespace != "" {
+ continue
+ }
+ if n.DataAtom != tagAtom {
+ continue
+ }
+ if len(n.Attr) != len(attr) {
+ continue
+ }
+ compareAttributes:
+ for _, t0 := range n.Attr {
+ for _, t1 := range attr {
+ if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val {
+ // Found a match for this attribute, continue with the next attribute.
+ continue compareAttributes
+ }
+ }
+ // If we get here, there is no attribute that matches a.
+ // Therefore the element is not identical to the new one.
+ continue findIdenticalElements
+ }
+
+ identicalElements++
+ if identicalElements >= 3 {
+ p.afe.remove(n)
+ }
+ }
+
+ p.afe = append(p.afe, p.top())
+}
+
+// Section 12.2.3.3.
+func (p *parser) clearActiveFormattingElements() {
+ for {
+ n := p.afe.pop()
+ if len(p.afe) == 0 || n.Type == scopeMarkerNode {
+ return
+ }
+ }
+}
+
+// Section 12.2.3.3.
+func (p *parser) reconstructActiveFormattingElements() {
+ n := p.afe.top()
+ if n == nil {
+ return
+ }
+ if n.Type == scopeMarkerNode || p.oe.index(n) != -1 {
+ return
+ }
+ i := len(p.afe) - 1
+ for n.Type != scopeMarkerNode && p.oe.index(n) == -1 {
+ if i == 0 {
+ i = -1
+ break
+ }
+ i--
+ n = p.afe[i]
+ }
+ for {
+ i++
+ clone := p.afe[i].clone()
+ p.addChild(clone)
+ p.afe[i] = clone
+ if i == len(p.afe)-1 {
+ break
+ }
+ }
+}
+
+// Section 12.2.4.
+func (p *parser) acknowledgeSelfClosingTag() {
+ p.hasSelfClosingToken = false
+}
+
+// An insertion mode (section 12.2.3.1) is the state transition function from
+// a particular state in the HTML5 parser's state machine. It updates the
+// parser's fields depending on parser.tok (where ErrorToken means EOF).
+// It returns whether the token was consumed.
+type insertionMode func(*parser) bool
+
+// setOriginalIM sets the insertion mode to return to after completing a text or
+// inTableText insertion mode.
+// Section 12.2.3.1, "using the rules for".
+func (p *parser) setOriginalIM() {
+ if p.originalIM != nil {
+ panic("html: bad parser state: originalIM was set twice")
+ }
+ p.originalIM = p.im
+}
+
+// Section 12.2.3.1, "reset the insertion mode".
+func (p *parser) resetInsertionMode() {
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ n := p.oe[i]
+ if i == 0 && p.context != nil {
+ n = p.context
+ }
+
+ switch n.DataAtom {
+ case a.Select:
+ p.im = inSelectIM
+ case a.Td, a.Th:
+ p.im = inCellIM
+ case a.Tr:
+ p.im = inRowIM
+ case a.Tbody, a.Thead, a.Tfoot:
+ p.im = inTableBodyIM
+ case a.Caption:
+ p.im = inCaptionIM
+ case a.Colgroup:
+ p.im = inColumnGroupIM
+ case a.Table:
+ p.im = inTableIM
+ case a.Head:
+ p.im = inBodyIM
+ case a.Body:
+ p.im = inBodyIM
+ case a.Frameset:
+ p.im = inFramesetIM
+ case a.Html:
+ p.im = beforeHeadIM
+ default:
+ continue
+ }
+ return
+ }
+ p.im = inBodyIM
+}
+
+const whitespace = " \t\r\n\f"
+
+// Section 12.2.5.4.1.
+func initialIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
+ if len(p.tok.Data) == 0 {
+ // It was all whitespace, so ignore it.
+ return true
+ }
+ case CommentToken:
+ p.doc.AppendChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ n, quirks := parseDoctype(p.tok.Data)
+ p.doc.AppendChild(n)
+ p.quirks = quirks
+ p.im = beforeHTMLIM
+ return true
+ }
+ p.quirks = true
+ p.im = beforeHTMLIM
+ return false
+}
+
+// Section 12.2.5.4.2.
+func beforeHTMLIM(p *parser) bool {
+ switch p.tok.Type {
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ case TextToken:
+ p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
+ if len(p.tok.Data) == 0 {
+ // It was all whitespace, so ignore it.
+ return true
+ }
+ case StartTagToken:
+ if p.tok.DataAtom == a.Html {
+ p.addElement()
+ p.im = beforeHeadIM
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Head, a.Body, a.Html, a.Br:
+ p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
+ return false
+ default:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.doc.AppendChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ }
+ p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
+ return false
+}
+
+// Section 12.2.5.4.3.
+func beforeHeadIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
+ if len(p.tok.Data) == 0 {
+ // It was all whitespace, so ignore it.
+ return true
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Head:
+ p.addElement()
+ p.head = p.top()
+ p.im = inHeadIM
+ return true
+ case a.Html:
+ return inBodyIM(p)
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Head, a.Body, a.Html, a.Br:
+ p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
+ return false
+ default:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ }
+
+ p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
+ return false
+}
+
+// Section 12.2.5.4.4.
+func inHeadIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ s := strings.TrimLeft(p.tok.Data, whitespace)
+ if len(s) < len(p.tok.Data) {
+ // Add the initial whitespace to the current node.
+ p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
+ if s == "" {
+ return true
+ }
+ p.tok.Data = s
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta:
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ return true
+ case a.Script, a.Title, a.Noscript, a.Noframes, a.Style:
+ p.addElement()
+ p.setOriginalIM()
+ p.im = textIM
+ return true
+ case a.Head:
+ // Ignore the token.
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Head:
+ n := p.oe.pop()
+ if n.DataAtom != a.Head {
+ panic("html: bad parser state: <head> element not found, in the in-head insertion mode")
+ }
+ p.im = afterHeadIM
+ return true
+ case a.Body, a.Html, a.Br:
+ p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
+ return false
+ default:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ }
+
+ p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
+ return false
+}
+
+// Section 12.2.5.4.6.
+func afterHeadIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ s := strings.TrimLeft(p.tok.Data, whitespace)
+ if len(s) < len(p.tok.Data) {
+ // Add the initial whitespace to the current node.
+ p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
+ if s == "" {
+ return true
+ }
+ p.tok.Data = s
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Body:
+ p.addElement()
+ p.framesetOK = false
+ p.im = inBodyIM
+ return true
+ case a.Frameset:
+ p.addElement()
+ p.im = inFramesetIM
+ return true
+ case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
+ p.oe = append(p.oe, p.head)
+ defer p.oe.remove(p.head)
+ return inHeadIM(p)
+ case a.Head:
+ // Ignore the token.
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Body, a.Html, a.Br:
+ // Drop down to creating an implied <body> tag.
+ default:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ }
+
+ p.parseImpliedToken(StartTagToken, a.Body, a.Body.String())
+ p.framesetOK = true
+ return false
+}
+
+// copyAttributes copies attributes of src not found on dst to dst.
+func copyAttributes(dst *Node, src Token) {
+ if len(src.Attr) == 0 {
+ return
+ }
+ attr := map[string]string{}
+ for _, t := range dst.Attr {
+ attr[t.Key] = t.Val
+ }
+ for _, t := range src.Attr {
+ if _, ok := attr[t.Key]; !ok {
+ dst.Attr = append(dst.Attr, t)
+ attr[t.Key] = t.Val
+ }
+ }
+}
+
+// Section 12.2.5.4.7.
+func inBodyIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ d := p.tok.Data
+ switch n := p.oe.top(); n.DataAtom {
+ case a.Pre, a.Listing:
+ if n.FirstChild == nil {
+ // Ignore a newline at the start of a <pre> block.
+ if d != "" && d[0] == '\r' {
+ d = d[1:]
+ }
+ if d != "" && d[0] == '\n' {
+ d = d[1:]
+ }
+ }
+ }
+ d = strings.Replace(d, "\x00", "", -1)
+ if d == "" {
+ return true
+ }
+ p.reconstructActiveFormattingElements()
+ p.addText(d)
+ if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
+ // There were non-whitespace characters inserted.
+ p.framesetOK = false
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ copyAttributes(p.oe[0], p.tok)
+ case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
+ return inHeadIM(p)
+ case a.Body:
+ if len(p.oe) >= 2 {
+ body := p.oe[1]
+ if body.Type == ElementNode && body.DataAtom == a.Body {
+ p.framesetOK = false
+ copyAttributes(body, p.tok)
+ }
+ }
+ case a.Frameset:
+ if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
+ // Ignore the token.
+ return true
+ }
+ body := p.oe[1]
+ if body.Parent != nil {
+ body.Parent.RemoveChild(body)
+ }
+ p.oe = p.oe[:1]
+ p.addElement()
+ p.im = inFramesetIM
+ return true
+ case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+ p.popUntil(buttonScope, a.P)
+ switch n := p.top(); n.DataAtom {
+ case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+ p.oe.pop()
+ }
+ p.addElement()
+ case a.Pre, a.Listing:
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ // The newline, if any, will be dealt with by the TextToken case.
+ p.framesetOK = false
+ case a.Form:
+ if p.form == nil {
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ p.form = p.top()
+ }
+ case a.Li:
+ p.framesetOK = false
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ node := p.oe[i]
+ switch node.DataAtom {
+ case a.Li:
+ p.oe = p.oe[:i]
+ case a.Address, a.Div, a.P:
+ continue
+ default:
+ if !isSpecialElement(node) {
+ continue
+ }
+ }
+ break
+ }
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ case a.Dd, a.Dt:
+ p.framesetOK = false
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ node := p.oe[i]
+ switch node.DataAtom {
+ case a.Dd, a.Dt:
+ p.oe = p.oe[:i]
+ case a.Address, a.Div, a.P:
+ continue
+ default:
+ if !isSpecialElement(node) {
+ continue
+ }
+ }
+ break
+ }
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ case a.Plaintext:
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ case a.Button:
+ p.popUntil(defaultScope, a.Button)
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.framesetOK = false
+ case a.A:
+ for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
+ if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
+ p.inBodyEndTagFormatting(a.A)
+ p.oe.remove(n)
+ p.afe.remove(n)
+ break
+ }
+ }
+ p.reconstructActiveFormattingElements()
+ p.addFormattingElement()
+ case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+ p.reconstructActiveFormattingElements()
+ p.addFormattingElement()
+ case a.Nobr:
+ p.reconstructActiveFormattingElements()
+ if p.elementInScope(defaultScope, a.Nobr) {
+ p.inBodyEndTagFormatting(a.Nobr)
+ p.reconstructActiveFormattingElements()
+ }
+ p.addFormattingElement()
+ case a.Applet, a.Marquee, a.Object:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.afe = append(p.afe, &scopeMarker)
+ p.framesetOK = false
+ case a.Table:
+ if !p.quirks {
+ p.popUntil(buttonScope, a.P)
+ }
+ p.addElement()
+ p.framesetOK = false
+ p.im = inTableIM
+ return true
+ case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ if p.tok.DataAtom == a.Input {
+ for _, t := range p.tok.Attr {
+ if t.Key == "type" {
+ if strings.ToLower(t.Val) == "hidden" {
+ // Skip setting framesetOK = false
+ return true
+ }
+ }
+ }
+ }
+ p.framesetOK = false
+ case a.Param, a.Source, a.Track:
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ case a.Hr:
+ p.popUntil(buttonScope, a.P)
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ p.framesetOK = false
+ case a.Image:
+ p.tok.DataAtom = a.Img
+ p.tok.Data = a.Img.String()
+ return false
+ case a.Isindex:
+ if p.form != nil {
+ // Ignore the token.
+ return true
+ }
+ action := ""
+ prompt := "This is a searchable index. Enter search keywords: "
+ attr := []Attribute{{Key: "name", Val: "isindex"}}
+ for _, t := range p.tok.Attr {
+ switch t.Key {
+ case "action":
+ action = t.Val
+ case "name":
+ // Ignore the attribute.
+ case "prompt":
+ prompt = t.Val
+ default:
+ attr = append(attr, t)
+ }
+ }
+ p.acknowledgeSelfClosingTag()
+ p.popUntil(buttonScope, a.P)
+ p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
+ if action != "" {
+ p.form.Attr = []Attribute{{Key: "action", Val: action}}
+ }
+ p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+ p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
+ p.addText(prompt)
+ p.addChild(&Node{
+ Type: ElementNode,
+ DataAtom: a.Input,
+ Data: a.Input.String(),
+ Attr: attr,
+ })
+ p.oe.pop()
+ p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
+ p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+ p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
+ case a.Textarea:
+ p.addElement()
+ p.setOriginalIM()
+ p.framesetOK = false
+ p.im = textIM
+ case a.Xmp:
+ p.popUntil(buttonScope, a.P)
+ p.reconstructActiveFormattingElements()
+ p.framesetOK = false
+ p.addElement()
+ p.setOriginalIM()
+ p.im = textIM
+ case a.Iframe:
+ p.framesetOK = false
+ p.addElement()
+ p.setOriginalIM()
+ p.im = textIM
+ case a.Noembed, a.Noscript:
+ p.addElement()
+ p.setOriginalIM()
+ p.im = textIM
+ case a.Select:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.framesetOK = false
+ p.im = inSelectIM
+ return true
+ case a.Optgroup, a.Option:
+ if p.top().DataAtom == a.Option {
+ p.oe.pop()
+ }
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ case a.Rp, a.Rt:
+ if p.elementInScope(defaultScope, a.Ruby) {
+ p.generateImpliedEndTags()
+ }
+ p.addElement()
+ case a.Math, a.Svg:
+ p.reconstructActiveFormattingElements()
+ if p.tok.DataAtom == a.Math {
+ adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+ } else {
+ adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+ }
+ adjustForeignAttributes(p.tok.Attr)
+ p.addElement()
+ p.top().Namespace = p.tok.Data
+ if p.hasSelfClosingToken {
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ }
+ return true
+ case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+ // Ignore the token.
+ default:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Body:
+ if p.elementInScope(defaultScope, a.Body) {
+ p.im = afterBodyIM
+ }
+ case a.Html:
+ if p.elementInScope(defaultScope, a.Body) {
+ p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
+ return false
+ }
+ return true
+ case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+ p.popUntil(defaultScope, p.tok.DataAtom)
+ case a.Form:
+ node := p.form
+ p.form = nil
+ i := p.indexOfElementInScope(defaultScope, a.Form)
+ if node == nil || i == -1 || p.oe[i] != node {
+ // Ignore the token.
+ return true
+ }
+ p.generateImpliedEndTags()
+ p.oe.remove(node)
+ case a.P:
+ if !p.elementInScope(buttonScope, a.P) {
+ p.parseImpliedToken(StartTagToken, a.P, a.P.String())
+ }
+ p.popUntil(buttonScope, a.P)
+ case a.Li:
+ p.popUntil(listItemScope, a.Li)
+ case a.Dd, a.Dt:
+ p.popUntil(defaultScope, p.tok.DataAtom)
+ case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+ p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
+ case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+ p.inBodyEndTagFormatting(p.tok.DataAtom)
+ case a.Applet, a.Marquee, a.Object:
+ if p.popUntil(defaultScope, p.tok.DataAtom) {
+ p.clearActiveFormattingElements()
+ }
+ case a.Br:
+ p.tok.Type = StartTagToken
+ return false
+ default:
+ p.inBodyEndTagOther(p.tok.DataAtom)
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ }
+
+ return true
+}
+
+func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
+ // This is the "adoption agency" algorithm, described at
+ // https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
+
+ // TODO: this is a fairly literal line-by-line translation of that algorithm.
+ // Once the code successfully parses the comprehensive test suite, we should
+ // refactor this code to be more idiomatic.
+
+ // Steps 1-4. The outer loop.
+ for i := 0; i < 8; i++ {
+ // Step 5. Find the formatting element.
+ var formattingElement *Node
+ for j := len(p.afe) - 1; j >= 0; j-- {
+ if p.afe[j].Type == scopeMarkerNode {
+ break
+ }
+ if p.afe[j].DataAtom == tagAtom {
+ formattingElement = p.afe[j]
+ break
+ }
+ }
+ if formattingElement == nil {
+ p.inBodyEndTagOther(tagAtom)
+ return
+ }
+ feIndex := p.oe.index(formattingElement)
+ if feIndex == -1 {
+ p.afe.remove(formattingElement)
+ return
+ }
+ if !p.elementInScope(defaultScope, tagAtom) {
+ // Ignore the tag.
+ return
+ }
+
+ // Steps 9-10. Find the furthest block.
+ var furthestBlock *Node
+ for _, e := range p.oe[feIndex:] {
+ if isSpecialElement(e) {
+ furthestBlock = e
+ break
+ }
+ }
+ if furthestBlock == nil {
+ e := p.oe.pop()
+ for e != formattingElement {
+ e = p.oe.pop()
+ }
+ p.afe.remove(e)
+ return
+ }
+
+ // Steps 11-12. Find the common ancestor and bookmark node.
+ commonAncestor := p.oe[feIndex-1]
+ bookmark := p.afe.index(formattingElement)
+
+ // Step 13. The inner loop. Find the lastNode to reparent.
+ lastNode := furthestBlock
+ node := furthestBlock
+ x := p.oe.index(node)
+ // Steps 13.1-13.2
+ for j := 0; j < 3; j++ {
+ // Step 13.3.
+ x--
+ node = p.oe[x]
+ // Step 13.4 - 13.5.
+ if p.afe.index(node) == -1 {
+ p.oe.remove(node)
+ continue
+ }
+ // Step 13.6.
+ if node == formattingElement {
+ break
+ }
+ // Step 13.7.
+ clone := node.clone()
+ p.afe[p.afe.index(node)] = clone
+ p.oe[p.oe.index(node)] = clone
+ node = clone
+ // Step 13.8.
+ if lastNode == furthestBlock {
+ bookmark = p.afe.index(node) + 1
+ }
+ // Step 13.9.
+ if lastNode.Parent != nil {
+ lastNode.Parent.RemoveChild(lastNode)
+ }
+ node.AppendChild(lastNode)
+ // Step 13.10.
+ lastNode = node
+ }
+
+ // Step 14. Reparent lastNode to the common ancestor,
+ // or for misnested table nodes, to the foster parent.
+ if lastNode.Parent != nil {
+ lastNode.Parent.RemoveChild(lastNode)
+ }
+ switch commonAncestor.DataAtom {
+ case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ p.fosterParent(lastNode)
+ default:
+ commonAncestor.AppendChild(lastNode)
+ }
+
+ // Steps 15-17. Reparent nodes from the furthest block's children
+ // to a clone of the formatting element.
+ clone := formattingElement.clone()
+ reparentChildren(clone, furthestBlock)
+ furthestBlock.AppendChild(clone)
+
+ // Step 18. Fix up the list of active formatting elements.
+ if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
+ // Move the bookmark with the rest of the list.
+ bookmark--
+ }
+ p.afe.remove(formattingElement)
+ p.afe.insert(bookmark, clone)
+
+ // Step 19. Fix up the stack of open elements.
+ p.oe.remove(formattingElement)
+ p.oe.insert(p.oe.index(furthestBlock)+1, clone)
+ }
+}
+
+// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
+// "Any other end tag" handling from 12.2.5.5 The rules for parsing tokens in foreign content
+// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
+func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ if p.oe[i].DataAtom == tagAtom {
+ p.oe = p.oe[:i]
+ break
+ }
+ if isSpecialElement(p.oe[i]) {
+ break
+ }
+ }
+}
+
+// Section 12.2.5.4.8.
+func textIM(p *parser) bool {
+ switch p.tok.Type {
+ case ErrorToken:
+ p.oe.pop()
+ case TextToken:
+ d := p.tok.Data
+ if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
+ // Ignore a newline at the start of a <textarea> block.
+ if d != "" && d[0] == '\r' {
+ d = d[1:]
+ }
+ if d != "" && d[0] == '\n' {
+ d = d[1:]
+ }
+ }
+ if d == "" {
+ return true
+ }
+ p.addText(d)
+ return true
+ case EndTagToken:
+ p.oe.pop()
+ }
+ p.im = p.originalIM
+ p.originalIM = nil
+ return p.tok.Type == EndTagToken
+}
+
+// Section 12.2.5.4.9.
+func inTableIM(p *parser) bool {
+ switch p.tok.Type {
+ case ErrorToken:
+ // Stop parsing.
+ return true
+ case TextToken:
+ p.tok.Data = strings.Replace(p.tok.Data, "\x00", "", -1)
+ switch p.oe.top().DataAtom {
+ case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ if strings.Trim(p.tok.Data, whitespace) == "" {
+ p.addText(p.tok.Data)
+ return true
+ }
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Caption:
+ p.clearStackToContext(tableScope)
+ p.afe = append(p.afe, &scopeMarker)
+ p.addElement()
+ p.im = inCaptionIM
+ return true
+ case a.Colgroup:
+ p.clearStackToContext(tableScope)
+ p.addElement()
+ p.im = inColumnGroupIM
+ return true
+ case a.Col:
+ p.parseImpliedToken(StartTagToken, a.Colgroup, a.Colgroup.String())
+ return false
+ case a.Tbody, a.Tfoot, a.Thead:
+ p.clearStackToContext(tableScope)
+ p.addElement()
+ p.im = inTableBodyIM
+ return true
+ case a.Td, a.Th, a.Tr:
+ p.parseImpliedToken(StartTagToken, a.Tbody, a.Tbody.String())
+ return false
+ case a.Table:
+ if p.popUntil(tableScope, a.Table) {
+ p.resetInsertionMode()
+ return false
+ }
+ // Ignore the token.
+ return true
+ case a.Style, a.Script:
+ return inHeadIM(p)
+ case a.Input:
+ for _, t := range p.tok.Attr {
+ if t.Key == "type" && strings.ToLower(t.Val) == "hidden" {
+ p.addElement()
+ p.oe.pop()
+ return true
+ }
+ }
+ // Otherwise drop down to the default action.
+ case a.Form:
+ if p.form != nil {
+ // Ignore the token.
+ return true
+ }
+ p.addElement()
+ p.form = p.oe.pop()
+ case a.Select:
+ p.reconstructActiveFormattingElements()
+ switch p.top().DataAtom {
+ case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ p.fosterParenting = true
+ }
+ p.addElement()
+ p.fosterParenting = false
+ p.framesetOK = false
+ p.im = inSelectInTableIM
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Table:
+ if p.popUntil(tableScope, a.Table) {
+ p.resetInsertionMode()
+ return true
+ }
+ // Ignore the token.
+ return true
+ case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ }
+
+ p.fosterParenting = true
+ defer func() { p.fosterParenting = false }()
+
+ return inBodyIM(p)
+}
+
+// Section 12.2.5.4.11.
+func inCaptionIM(p *parser) bool {
+ switch p.tok.Type {
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Thead, a.Tr:
+ if p.popUntil(tableScope, a.Caption) {
+ p.clearActiveFormattingElements()
+ p.im = inTableIM
+ return false
+ } else {
+ // Ignore the token.
+ return true
+ }
+ case a.Select:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.framesetOK = false
+ p.im = inSelectInTableIM
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Caption:
+ if p.popUntil(tableScope, a.Caption) {
+ p.clearActiveFormattingElements()
+ p.im = inTableIM
+ }
+ return true
+ case a.Table:
+ if p.popUntil(tableScope, a.Caption) {
+ p.clearActiveFormattingElements()
+ p.im = inTableIM
+ return false
+ } else {
+ // Ignore the token.
+ return true
+ }
+ case a.Body, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+ // Ignore the token.
+ return true
+ }
+ }
+ return inBodyIM(p)
+}
+
+// Section 12.2.5.4.12.
+func inColumnGroupIM(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ s := strings.TrimLeft(p.tok.Data, whitespace)
+ if len(s) < len(p.tok.Data) {
+ // Add the initial whitespace to the current node.
+ p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
+ if s == "" {
+ return true
+ }
+ p.tok.Data = s
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Col:
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Colgroup:
+ if p.oe.top().DataAtom != a.Html {
+ p.oe.pop()
+ p.im = inTableIM
+ }
+ return true
+ case a.Col:
+ // Ignore the token.
+ return true
+ }
+ }
+ if p.oe.top().DataAtom != a.Html {
+ p.oe.pop()
+ p.im = inTableIM
+ return false
+ }
+ return true
+}
+
+// Section 12.2.5.4.13.
+func inTableBodyIM(p *parser) bool {
+ switch p.tok.Type {
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Tr:
+ p.clearStackToContext(tableBodyScope)
+ p.addElement()
+ p.im = inRowIM
+ return true
+ case a.Td, a.Th:
+ p.parseImpliedToken(StartTagToken, a.Tr, a.Tr.String())
+ return false
+ case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead:
+ if p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {
+ p.im = inTableIM
+ return false
+ }
+ // Ignore the token.
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Tbody, a.Tfoot, a.Thead:
+ if p.elementInScope(tableScope, p.tok.DataAtom) {
+ p.clearStackToContext(tableBodyScope)
+ p.oe.pop()
+ p.im = inTableIM
+ }
+ return true
+ case a.Table:
+ if p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {
+ p.im = inTableIM
+ return false
+ }
+ // Ignore the token.
+ return true
+ case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th, a.Tr:
+ // Ignore the token.
+ return true
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ }
+
+ return inTableIM(p)
+}
+
+// Section 12.2.5.4.14.
+func inRowIM(p *parser) bool {
+ switch p.tok.Type {
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Td, a.Th:
+ p.clearStackToContext(tableRowScope)
+ p.addElement()
+ p.afe = append(p.afe, &scopeMarker)
+ p.im = inCellIM
+ return true
+ case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ if p.popUntil(tableScope, a.Tr) {
+ p.im = inTableBodyIM
+ return false
+ }
+ // Ignore the token.
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Tr:
+ if p.popUntil(tableScope, a.Tr) {
+ p.im = inTableBodyIM
+ return true
+ }
+ // Ignore the token.
+ return true
+ case a.Table:
+ if p.popUntil(tableScope, a.Tr) {
+ p.im = inTableBodyIM
+ return false
+ }
+ // Ignore the token.
+ return true
+ case a.Tbody, a.Tfoot, a.Thead:
+ if p.elementInScope(tableScope, p.tok.DataAtom) {
+ p.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String())
+ return false
+ }
+ // Ignore the token.
+ return true
+ case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th:
+ // Ignore the token.
+ return true
+ }
+ }
+
+ return inTableIM(p)
+}
+
+// Section 12.2.5.4.15.
+func inCellIM(p *parser) bool {
+ switch p.tok.Type {
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+ if p.popUntil(tableScope, a.Td, a.Th) {
+ // Close the cell and reprocess.
+ p.clearActiveFormattingElements()
+ p.im = inRowIM
+ return false
+ }
+ // Ignore the token.
+ return true
+ case a.Select:
+ p.reconstructActiveFormattingElements()
+ p.addElement()
+ p.framesetOK = false
+ p.im = inSelectInTableIM
+ return true
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Td, a.Th:
+ if !p.popUntil(tableScope, p.tok.DataAtom) {
+ // Ignore the token.
+ return true
+ }
+ p.clearActiveFormattingElements()
+ p.im = inRowIM
+ return true
+ case a.Body, a.Caption, a.Col, a.Colgroup, a.Html:
+ // Ignore the token.
+ return true
+ case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+ if !p.elementInScope(tableScope, p.tok.DataAtom) {
+ // Ignore the token.
+ return true
+ }
+ // Close the cell and reprocess.
+ p.popUntil(tableScope, a.Td, a.Th)
+ p.clearActiveFormattingElements()
+ p.im = inRowIM
+ return false
+ }
+ }
+ return inBodyIM(p)
+}
+
+// Section 12.2.5.4.16.
+func inSelectIM(p *parser) bool {
+ switch p.tok.Type {
+ case ErrorToken:
+ // Stop parsing.
+ return true
+ case TextToken:
+ p.addText(strings.Replace(p.tok.Data, "\x00", "", -1))
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Option:
+ if p.top().DataAtom == a.Option {
+ p.oe.pop()
+ }
+ p.addElement()
+ case a.Optgroup:
+ if p.top().DataAtom == a.Option {
+ p.oe.pop()
+ }
+ if p.top().DataAtom == a.Optgroup {
+ p.oe.pop()
+ }
+ p.addElement()
+ case a.Select:
+ p.tok.Type = EndTagToken
+ return false
+ case a.Input, a.Keygen, a.Textarea:
+ if p.elementInScope(selectScope, a.Select) {
+ p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
+ return false
+ }
+ // In order to properly ignore <textarea>, we need to change the tokenizer mode.
+ p.tokenizer.NextIsNotRawText()
+ // Ignore the token.
+ return true
+ case a.Script:
+ return inHeadIM(p)
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Option:
+ if p.top().DataAtom == a.Option {
+ p.oe.pop()
+ }
+ case a.Optgroup:
+ i := len(p.oe) - 1
+ if p.oe[i].DataAtom == a.Option {
+ i--
+ }
+ if p.oe[i].DataAtom == a.Optgroup {
+ p.oe = p.oe[:i]
+ }
+ case a.Select:
+ if p.popUntil(selectScope, a.Select) {
+ p.resetInsertionMode()
+ }
+ }
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ case DoctypeToken:
+ // Ignore the token.
+ return true
+ }
+
+ return true
+}
+
+// Section 12.2.5.4.17.
+func inSelectInTableIM(p *parser) bool {
+ switch p.tok.Type {
+ case StartTagToken, EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Caption, a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr, a.Td, a.Th:
+ if p.tok.Type == StartTagToken || p.elementInScope(tableScope, p.tok.DataAtom) {
+ p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
+ return false
+ } else {
+ // Ignore the token.
+ return true
+ }
+ }
+ }
+ return inSelectIM(p)
+}
+
+// Section 12.2.5.4.18.
+func afterBodyIM(p *parser) bool {
+ switch p.tok.Type {
+ case ErrorToken:
+ // Stop parsing.
+ return true
+ case TextToken:
+ s := strings.TrimLeft(p.tok.Data, whitespace)
+ if len(s) == 0 {
+ // It was all whitespace.
+ return inBodyIM(p)
+ }
+ case StartTagToken:
+ if p.tok.DataAtom == a.Html {
+ return inBodyIM(p)
+ }
+ case EndTagToken:
+ if p.tok.DataAtom == a.Html {
+ if !p.fragment {
+ p.im = afterAfterBodyIM
+ }
+ return true
+ }
+ case CommentToken:
+ // The comment is attached to the <html> element.
+ if len(p.oe) < 1 || p.oe[0].DataAtom != a.Html {
+ panic("html: bad parser state: <html> element not found, in the after-body insertion mode")
+ }
+ p.oe[0].AppendChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ }
+ p.im = inBodyIM
+ return false
+}
+
+// Section 12.2.5.4.19.
+func inFramesetIM(p *parser) bool {
+ switch p.tok.Type {
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ case TextToken:
+ // Ignore all text but whitespace.
+ s := strings.Map(func(c rune) rune {
+ switch c {
+ case ' ', '\t', '\n', '\f', '\r':
+ return c
+ }
+ return -1
+ }, p.tok.Data)
+ if s != "" {
+ p.addText(s)
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Frameset:
+ p.addElement()
+ case a.Frame:
+ p.addElement()
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ case a.Noframes:
+ return inHeadIM(p)
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Frameset:
+ if p.oe.top().DataAtom != a.Html {
+ p.oe.pop()
+ if p.oe.top().DataAtom != a.Frameset {
+ p.im = afterFramesetIM
+ return true
+ }
+ }
+ }
+ default:
+ // Ignore the token.
+ }
+ return true
+}
+
+// Section 12.2.5.4.20.
+func afterFramesetIM(p *parser) bool {
+ switch p.tok.Type {
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ case TextToken:
+ // Ignore all text but whitespace.
+ s := strings.Map(func(c rune) rune {
+ switch c {
+ case ' ', '\t', '\n', '\f', '\r':
+ return c
+ }
+ return -1
+ }, p.tok.Data)
+ if s != "" {
+ p.addText(s)
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Noframes:
+ return inHeadIM(p)
+ }
+ case EndTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ p.im = afterAfterFramesetIM
+ return true
+ }
+ default:
+ // Ignore the token.
+ }
+ return true
+}
+
+// Section 12.2.5.4.21.
+func afterAfterBodyIM(p *parser) bool {
+ switch p.tok.Type {
+ case ErrorToken:
+ // Stop parsing.
+ return true
+ case TextToken:
+ s := strings.TrimLeft(p.tok.Data, whitespace)
+ if len(s) == 0 {
+ // It was all whitespace.
+ return inBodyIM(p)
+ }
+ case StartTagToken:
+ if p.tok.DataAtom == a.Html {
+ return inBodyIM(p)
+ }
+ case CommentToken:
+ p.doc.AppendChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ return true
+ case DoctypeToken:
+ return inBodyIM(p)
+ }
+ p.im = inBodyIM
+ return false
+}
+
+// Section 12.2.5.4.22.
+func afterAfterFramesetIM(p *parser) bool {
+ switch p.tok.Type {
+ case CommentToken:
+ p.doc.AppendChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ case TextToken:
+ // Ignore all text but whitespace.
+ s := strings.Map(func(c rune) rune {
+ switch c {
+ case ' ', '\t', '\n', '\f', '\r':
+ return c
+ }
+ return -1
+ }, p.tok.Data)
+ if s != "" {
+ p.tok.Data = s
+ return inBodyIM(p)
+ }
+ case StartTagToken:
+ switch p.tok.DataAtom {
+ case a.Html:
+ return inBodyIM(p)
+ case a.Noframes:
+ return inHeadIM(p)
+ }
+ case DoctypeToken:
+ return inBodyIM(p)
+ default:
+ // Ignore the token.
+ }
+ return true
+}
+
+const whitespaceOrNUL = whitespace + "\x00"
+
+// Section 12.2.5.5.
+func parseForeignContent(p *parser) bool {
+ switch p.tok.Type {
+ case TextToken:
+ if p.framesetOK {
+ p.framesetOK = strings.TrimLeft(p.tok.Data, whitespaceOrNUL) == ""
+ }
+ p.tok.Data = strings.Replace(p.tok.Data, "\x00", "\ufffd", -1)
+ p.addText(p.tok.Data)
+ case CommentToken:
+ p.addChild(&Node{
+ Type: CommentNode,
+ Data: p.tok.Data,
+ })
+ case StartTagToken:
+ b := breakout[p.tok.Data]
+ if p.tok.DataAtom == a.Font {
+ loop:
+ for _, attr := range p.tok.Attr {
+ switch attr.Key {
+ case "color", "face", "size":
+ b = true
+ break loop
+ }
+ }
+ }
+ if b {
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ n := p.oe[i]
+ if n.Namespace == "" || htmlIntegrationPoint(n) || mathMLTextIntegrationPoint(n) {
+ p.oe = p.oe[:i+1]
+ break
+ }
+ }
+ return false
+ }
+ switch p.top().Namespace {
+ case "math":
+ adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+ case "svg":
+ // Adjust SVG tag names. The tokenizer lower-cases tag names, but
+ // SVG wants e.g. "foreignObject" with a capital second "O".
+ if x := svgTagNameAdjustments[p.tok.Data]; x != "" {
+ p.tok.DataAtom = a.Lookup([]byte(x))
+ p.tok.Data = x
+ }
+ adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+ default:
+ panic("html: bad parser state: unexpected namespace")
+ }
+ adjustForeignAttributes(p.tok.Attr)
+ namespace := p.top().Namespace
+ p.addElement()
+ p.top().Namespace = namespace
+ if namespace != "" {
+ // Don't let the tokenizer go into raw text mode in foreign content
+ // (e.g. in an SVG <title> tag).
+ p.tokenizer.NextIsNotRawText()
+ }
+ if p.hasSelfClosingToken {
+ p.oe.pop()
+ p.acknowledgeSelfClosingTag()
+ }
+ case EndTagToken:
+ for i := len(p.oe) - 1; i >= 0; i-- {
+ if p.oe[i].Namespace == "" {
+ return p.im(p)
+ }
+ if strings.EqualFold(p.oe[i].Data, p.tok.Data) {
+ p.oe = p.oe[:i]
+ break
+ }
+ }
+ return true
+ default:
+ // Ignore the token.
+ }
+ return true
+}
+
+// Section 12.2.5.
+func (p *parser) inForeignContent() bool {
+ if len(p.oe) == 0 {
+ return false
+ }
+ n := p.oe[len(p.oe)-1]
+ if n.Namespace == "" {
+ return false
+ }
+ if mathMLTextIntegrationPoint(n) {
+ if p.tok.Type == StartTagToken && p.tok.DataAtom != a.Mglyph && p.tok.DataAtom != a.Malignmark {
+ return false
+ }
+ if p.tok.Type == TextToken {
+ return false
+ }
+ }
+ if n.Namespace == "math" && n.DataAtom == a.AnnotationXml && p.tok.Type == StartTagToken && p.tok.DataAtom == a.Svg {
+ return false
+ }
+ if htmlIntegrationPoint(n) && (p.tok.Type == StartTagToken || p.tok.Type == TextToken) {
+ return false
+ }
+ if p.tok.Type == ErrorToken {
+ return false
+ }
+ return true
+}
+
+// parseImpliedToken parses a token as though it had appeared in the parser's
+// input.
+func (p *parser) parseImpliedToken(t TokenType, dataAtom a.Atom, data string) {
+ realToken, selfClosing := p.tok, p.hasSelfClosingToken
+ p.tok = Token{
+ Type: t,
+ DataAtom: dataAtom,
+ Data: data,
+ }
+ p.hasSelfClosingToken = false
+ p.parseCurrentToken()
+ p.tok, p.hasSelfClosingToken = realToken, selfClosing
+}
+
+// parseCurrentToken runs the current token through the parsing routines
+// until it is consumed.
+func (p *parser) parseCurrentToken() {
+ if p.tok.Type == SelfClosingTagToken {
+ p.hasSelfClosingToken = true
+ p.tok.Type = StartTagToken
+ }
+
+ consumed := false
+ for !consumed {
+ if p.inForeignContent() {
+ consumed = parseForeignContent(p)
+ } else {
+ consumed = p.im(p)
+ }
+ }
+
+ if p.hasSelfClosingToken {
+ // This is a parse error, but ignore it.
+ p.hasSelfClosingToken = false
+ }
+}
+
+func (p *parser) parse() error {
+ // Iterate until EOF. Any other error will cause an early return.
+ var err error
+ for err != io.EOF {
+ // CDATA sections are allowed only in foreign content.
+ n := p.oe.top()
+ p.tokenizer.AllowCDATA(n != nil && n.Namespace != "")
+ // Read and parse the next token.
+ p.tokenizer.Next()
+ p.tok = p.tokenizer.Token()
+ if p.tok.Type == ErrorToken {
+ err = p.tokenizer.Err()
+ if err != nil && err != io.EOF {
+ return err
+ }
+ }
+ p.parseCurrentToken()
+ }
+ return nil
+}
+
+// Parse returns the parse tree for the HTML from the given Reader.
+// The input is assumed to be UTF-8 encoded.
+func Parse(r io.Reader) (*Node, error) {
+ p := &parser{
+ tokenizer: NewTokenizer(r),
+ doc: &Node{
+ Type: DocumentNode,
+ },
+ scripting: true,
+ framesetOK: true,
+ im: initialIM,
+ }
+ err := p.parse()
+ if err != nil {
+ return nil, err
+ }
+ return p.doc, nil
+}
+
+// ParseFragment parses a fragment of HTML and returns the nodes that were
+// found. If the fragment is the InnerHTML for an existing element, pass that
+// element in context.
+func ParseFragment(r io.Reader, context *Node) ([]*Node, error) {
+ contextTag := ""
+ if context != nil {
+ if context.Type != ElementNode {
+ return nil, errors.New("html: ParseFragment of non-element Node")
+ }
+ // The next check isn't just context.DataAtom.String() == context.Data because
+ // it is valid to pass an element whose tag isn't a known atom. For example,
+ // DataAtom == 0 and Data = "tagfromthefuture" is perfectly consistent.
+ if context.DataAtom != a.Lookup([]byte(context.Data)) {
+ return nil, fmt.Errorf("html: inconsistent Node: DataAtom=%q, Data=%q", context.DataAtom, context.Data)
+ }
+ contextTag = context.DataAtom.String()
+ }
+ p := &parser{
+ tokenizer: NewTokenizerFragment(r, contextTag),
+ doc: &Node{
+ Type: DocumentNode,
+ },
+ scripting: true,
+ fragment: true,
+ context: context,
+ }
+
+ root := &Node{
+ Type: ElementNode,
+ DataAtom: a.Html,
+ Data: a.Html.String(),
+ }
+ p.doc.AppendChild(root)
+ p.oe = nodeStack{root}
+ p.resetInsertionMode()
+
+ for n := context; n != nil; n = n.Parent {
+ if n.Type == ElementNode && n.DataAtom == a.Form {
+ p.form = n
+ break
+ }
+ }
+
+ err := p.parse()
+ if err != nil {
+ return nil, err
+ }
+
+ parent := p.doc
+ if context != nil {
+ parent = root
+ }
+
+ var result []*Node
+ for c := parent.FirstChild; c != nil; {
+ next := c.NextSibling
+ parent.RemoveChild(c)
+ result = append(result, c)
+ c = next
+ }
+ return result, nil
+}
diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go
new file mode 100644
index 000000000..d34564f49
--- /dev/null
+++ b/vendor/golang.org/x/net/html/render.go
@@ -0,0 +1,271 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+type writer interface {
+ io.Writer
+ io.ByteWriter
+ WriteString(string) (int, error)
+}
+
+// Render renders the parse tree n to the given writer.
+//
+// Rendering is done on a 'best effort' basis: calling Parse on the output of
+// Render will always result in something similar to the original tree, but it
+// is not necessarily an exact clone unless the original tree was 'well-formed'.
+// 'Well-formed' is not easily specified; the HTML5 specification is
+// complicated.
+//
+// Calling Parse on arbitrary input typically results in a 'well-formed' parse
+// tree. However, it is possible for Parse to yield a 'badly-formed' parse tree.
+// For example, in a 'well-formed' parse tree, no <a> element is a child of
+// another <a> element: parsing "<a><a>" results in two sibling elements.
+// Similarly, in a 'well-formed' parse tree, no <a> element is a child of a
+// <table> element: parsing "<p><table><a>" results in a <p> with two sibling
+// children; the <a> is reparented to the <table>'s parent. However, calling
+// Parse on "<a><table><a>" does not return an error, but the result has an <a>
+// element with an <a> child, and is therefore not 'well-formed'.
+//
+// Programmatically constructed trees are typically also 'well-formed', but it
+// is possible to construct a tree that looks innocuous but, when rendered and
+// re-parsed, results in a different tree. A simple example is that a solitary
+// text node would become a tree containing <html>, <head> and <body> elements.
+// Another example is that the programmatic equivalent of "a<head>b</head>c"
+// becomes "<html><head><head/><body>abc</body></html>".
+func Render(w io.Writer, n *Node) error {
+ if x, ok := w.(writer); ok {
+ return render(x, n)
+ }
+ buf := bufio.NewWriter(w)
+ if err := render(buf, n); err != nil {
+ return err
+ }
+ return buf.Flush()
+}
+
+// plaintextAbort is returned from render1 when a <plaintext> element
+// has been rendered. No more end tags should be rendered after that.
+var plaintextAbort = errors.New("html: internal error (plaintext abort)")
+
+func render(w writer, n *Node) error {
+ err := render1(w, n)
+ if err == plaintextAbort {
+ err = nil
+ }
+ return err
+}
+
+func render1(w writer, n *Node) error {
+ // Render non-element nodes; these are the easy cases.
+ switch n.Type {
+ case ErrorNode:
+ return errors.New("html: cannot render an ErrorNode node")
+ case TextNode:
+ return escape(w, n.Data)
+ case DocumentNode:
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ return nil
+ case ElementNode:
+ // No-op.
+ case CommentNode:
+ if _, err := w.WriteString("<!--"); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ if _, err := w.WriteString("-->"); err != nil {
+ return err
+ }
+ return nil
+ case DoctypeNode:
+ if _, err := w.WriteString("<!DOCTYPE "); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ if n.Attr != nil {
+ var p, s string
+ for _, a := range n.Attr {
+ switch a.Key {
+ case "public":
+ p = a.Val
+ case "system":
+ s = a.Val
+ }
+ }
+ if p != "" {
+ if _, err := w.WriteString(" PUBLIC "); err != nil {
+ return err
+ }
+ if err := writeQuoted(w, p); err != nil {
+ return err
+ }
+ if s != "" {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ if err := writeQuoted(w, s); err != nil {
+ return err
+ }
+ }
+ } else if s != "" {
+ if _, err := w.WriteString(" SYSTEM "); err != nil {
+ return err
+ }
+ if err := writeQuoted(w, s); err != nil {
+ return err
+ }
+ }
+ }
+ return w.WriteByte('>')
+ default:
+ return errors.New("html: unknown node type")
+ }
+
+ // Render the <xxx> opening tag.
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ for _, a := range n.Attr {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ if a.Namespace != "" {
+ if _, err := w.WriteString(a.Namespace); err != nil {
+ return err
+ }
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if _, err := w.WriteString(a.Key); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(`="`); err != nil {
+ return err
+ }
+ if err := escape(w, a.Val); err != nil {
+ return err
+ }
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ }
+ if voidElements[n.Data] {
+ if n.FirstChild != nil {
+ return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
+ }
+ _, err := w.WriteString("/>")
+ return err
+ }
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+
+ // Add initial newline where there is danger of a newline beging ignored.
+ if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
+ switch n.Data {
+ case "pre", "listing", "textarea":
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Render any child nodes.
+ switch n.Data {
+ case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type == TextNode {
+ if _, err := w.WriteString(c.Data); err != nil {
+ return err
+ }
+ } else {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ }
+ if n.Data == "plaintext" {
+ // Don't render anything else. <plaintext> must be the
+ // last element in the file, with no closing tag.
+ return plaintextAbort
+ }
+ default:
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if err := render1(w, c); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Render the </xxx> closing tag.
+ if _, err := w.WriteString("</"); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(n.Data); err != nil {
+ return err
+ }
+ return w.WriteByte('>')
+}
+
+// writeQuoted writes s to w surrounded by quotes. Normally it will use double
+// quotes, but if s contains a double quote, it will use single quotes.
+// It is used for writing the identifiers in a doctype declaration.
+// In valid HTML, they can't contain both types of quotes.
+func writeQuoted(w writer, s string) error {
+ var q byte = '"'
+ if strings.Contains(s, `"`) {
+ q = '\''
+ }
+ if err := w.WriteByte(q); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(s); err != nil {
+ return err
+ }
+ if err := w.WriteByte(q); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Section 12.1.2, "Elements", gives this list of void elements. Void elements
+// are those that can't have any contents.
+var voidElements = map[string]bool{
+ "area": true,
+ "base": true,
+ "br": true,
+ "col": true,
+ "command": true,
+ "embed": true,
+ "hr": true,
+ "img": true,
+ "input": true,
+ "keygen": true,
+ "link": true,
+ "meta": true,
+ "param": true,
+ "source": true,
+ "track": true,
+ "wbr": true,
+}
diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go
new file mode 100644
index 000000000..893e272a9
--- /dev/null
+++ b/vendor/golang.org/x/net/html/token.go
@@ -0,0 +1,1219 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package html
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/html/atom"
+)
+
+// A TokenType is the type of a Token.
+type TokenType uint32
+
+const (
+ // ErrorToken means that an error occurred during tokenization.
+ ErrorToken TokenType = iota
+ // TextToken means a text node.
+ TextToken
+ // A StartTagToken looks like <a>.
+ StartTagToken
+ // An EndTagToken looks like </a>.
+ EndTagToken
+ // A SelfClosingTagToken tag looks like <br/>.
+ SelfClosingTagToken
+ // A CommentToken looks like <!--x-->.
+ CommentToken
+ // A DoctypeToken looks like <!DOCTYPE x>
+ DoctypeToken
+)
+
+// ErrBufferExceeded means that the buffering limit was exceeded.
+var ErrBufferExceeded = errors.New("max buffer exceeded")
+
+// String returns a string representation of the TokenType.
+func (t TokenType) String() string {
+ switch t {
+ case ErrorToken:
+ return "Error"
+ case TextToken:
+ return "Text"
+ case StartTagToken:
+ return "StartTag"
+ case EndTagToken:
+ return "EndTag"
+ case SelfClosingTagToken:
+ return "SelfClosingTag"
+ case CommentToken:
+ return "Comment"
+ case DoctypeToken:
+ return "Doctype"
+ }
+ return "Invalid(" + strconv.Itoa(int(t)) + ")"
+}
+
+// An Attribute is an attribute namespace-key-value triple. Namespace is
+// non-empty for foreign attributes like xlink, Key is alphabetic (and hence
+// does not contain escapable characters like '&', '<' or '>'), and Val is
+// unescaped (it looks like "a<b" rather than "a&lt;b").
+//
+// Namespace is only used by the parser, not the tokenizer.
+type Attribute struct {
+ Namespace, Key, Val string
+}
+
+// A Token consists of a TokenType and some Data (tag name for start and end
+// tags, content for text, comments and doctypes). A tag Token may also contain
+// a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b"
+// rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or
+// zero if Data is not a known tag name.
+type Token struct {
+ Type TokenType
+ DataAtom atom.Atom
+ Data string
+ Attr []Attribute
+}
+
+// tagString returns a string representation of a tag Token's Data and Attr.
+func (t Token) tagString() string {
+ if len(t.Attr) == 0 {
+ return t.Data
+ }
+ buf := bytes.NewBufferString(t.Data)
+ for _, a := range t.Attr {
+ buf.WriteByte(' ')
+ buf.WriteString(a.Key)
+ buf.WriteString(`="`)
+ escape(buf, a.Val)
+ buf.WriteByte('"')
+ }
+ return buf.String()
+}
+
+// String returns a string representation of the Token.
+func (t Token) String() string {
+ switch t.Type {
+ case ErrorToken:
+ return ""
+ case TextToken:
+ return EscapeString(t.Data)
+ case StartTagToken:
+ return "<" + t.tagString() + ">"
+ case EndTagToken:
+ return "</" + t.tagString() + ">"
+ case SelfClosingTagToken:
+ return "<" + t.tagString() + "/>"
+ case CommentToken:
+ return "<!--" + t.Data + "-->"
+ case DoctypeToken:
+ return "<!DOCTYPE " + t.Data + ">"
+ }
+ return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
+}
+
+// span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
+// the end is exclusive.
+type span struct {
+ start, end int
+}
+
+// A Tokenizer returns a stream of HTML Tokens.
+type Tokenizer struct {
+ // r is the source of the HTML text.
+ r io.Reader
+ // tt is the TokenType of the current token.
+ tt TokenType
+ // err is the first error encountered during tokenization. It is possible
+ // for tt != Error && err != nil to hold: this means that Next returned a
+ // valid token but the subsequent Next call will return an error token.
+ // For example, if the HTML text input was just "plain", then the first
+ // Next call would set z.err to io.EOF but return a TextToken, and all
+ // subsequent Next calls would return an ErrorToken.
+ // err is never reset. Once it becomes non-nil, it stays non-nil.
+ err error
+ // readErr is the error returned by the io.Reader r. It is separate from
+ // err because it is valid for an io.Reader to return (n int, err1 error)
+ // such that n > 0 && err1 != nil, and callers should always process the
+ // n > 0 bytes before considering the error err1.
+ readErr error
+ // buf[raw.start:raw.end] holds the raw bytes of the current token.
+ // buf[raw.end:] is buffered input that will yield future tokens.
+ raw span
+ buf []byte
+ // maxBuf limits the data buffered in buf. A value of 0 means unlimited.
+ maxBuf int
+ // buf[data.start:data.end] holds the raw bytes of the current token's data:
+ // a text token's text, a tag token's tag name, etc.
+ data span
+ // pendingAttr is the attribute key and value currently being tokenized.
+ // When complete, pendingAttr is pushed onto attr. nAttrReturned is
+ // incremented on each call to TagAttr.
+ pendingAttr [2]span
+ attr [][2]span
+ nAttrReturned int
+ // rawTag is the "script" in "</script>" that closes the next token. If
+ // non-empty, the subsequent call to Next will return a raw or RCDATA text
+ // token: one that treats "<p>" as text instead of an element.
+ // rawTag's contents are lower-cased.
+ rawTag string
+ // textIsRaw is whether the current text token's data is not escaped.
+ textIsRaw bool
+ // convertNUL is whether NUL bytes in the current token's data should
+ // be converted into \ufffd replacement characters.
+ convertNUL bool
+ // allowCDATA is whether CDATA sections are allowed in the current context.
+ allowCDATA bool
+}
+
+// AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as
+// the text "foo". The default value is false, which means to recognize it as
+// a bogus comment "<!-- [CDATA[foo]] -->" instead.
+//
+// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
+// only if tokenizing foreign content, such as MathML and SVG. However,
+// tracking foreign-contentness is difficult to do purely in the tokenizer,
+// as opposed to the parser, due to HTML integration points: an <svg> element
+// can contain a <foreignObject> that is foreign-to-SVG but not foreign-to-
+// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the
+// responsibility of the user of a tokenizer to call AllowCDATA as appropriate.
+// In practice, if using the tokenizer without caring whether MathML or SVG
+// CDATA is text or comments, such as tokenizing HTML to find all the anchor
+// text, it is acceptable to ignore this responsibility.
+func (z *Tokenizer) AllowCDATA(allowCDATA bool) {
+ z.allowCDATA = allowCDATA
+}
+
+// NextIsNotRawText instructs the tokenizer that the next token should not be
+// considered as 'raw text'. Some elements, such as script and title elements,
+// normally require the next token after the opening tag to be 'raw text' that
+// has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>"
+// yields a start tag token for "<title>", a text token for "a<b>c</b>d", and
+// an end tag token for "</title>". There are no distinct start tag or end tag
+// tokens for the "<b>" and "</b>".
+//
+// This tokenizer implementation will generally look for raw text at the right
+// times. Strictly speaking, an HTML5 compliant tokenizer should not look for
+// raw text if in foreign content: <title> generally needs raw text, but a
+// <title> inside an <svg> does not. Another example is that a <textarea>
+// generally needs raw text, but a <textarea> is not allowed as an immediate
+// child of a <select>; in normal parsing, a <textarea> implies </select>, but
+// one cannot close the implicit element when parsing a <select>'s InnerHTML.
+// Similarly to AllowCDATA, tracking the correct moment to override raw-text-
+// ness is difficult to do purely in the tokenizer, as opposed to the parser.
+// For strict compliance with the HTML5 tokenization algorithm, it is the
+// responsibility of the user of a tokenizer to call NextIsNotRawText as
+// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this
+// responsibility for basic usage.
+//
+// Note that this 'raw text' concept is different from the one offered by the
+// Tokenizer.Raw method.
+func (z *Tokenizer) NextIsNotRawText() {
+ z.rawTag = ""
+}
+
+// Err returns the error associated with the most recent ErrorToken token.
+// This is typically io.EOF, meaning the end of tokenization.
+func (z *Tokenizer) Err() error {
+ if z.tt != ErrorToken {
+ return nil
+ }
+ return z.err
+}
+
+// readByte returns the next byte from the input stream, doing a buffered read
+// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte
+// slice that holds all the bytes read so far for the current token.
+// It sets z.err if the underlying reader returns an error.
+// Pre-condition: z.err == nil.
+func (z *Tokenizer) readByte() byte {
+ if z.raw.end >= len(z.buf) {
+ // Our buffer is exhausted and we have to read from z.r. Check if the
+ // previous read resulted in an error.
+ if z.readErr != nil {
+ z.err = z.readErr
+ return 0
+ }
+ // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length
+ // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we
+ // allocate a new buffer before the copy.
+ c := cap(z.buf)
+ d := z.raw.end - z.raw.start
+ var buf1 []byte
+ if 2*d > c {
+ buf1 = make([]byte, d, 2*c)
+ } else {
+ buf1 = z.buf[:d]
+ }
+ copy(buf1, z.buf[z.raw.start:z.raw.end])
+ if x := z.raw.start; x != 0 {
+ // Adjust the data/attr spans to refer to the same contents after the copy.
+ z.data.start -= x
+ z.data.end -= x
+ z.pendingAttr[0].start -= x
+ z.pendingAttr[0].end -= x
+ z.pendingAttr[1].start -= x
+ z.pendingAttr[1].end -= x
+ for i := range z.attr {
+ z.attr[i][0].start -= x
+ z.attr[i][0].end -= x
+ z.attr[i][1].start -= x
+ z.attr[i][1].end -= x
+ }
+ }
+ z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d]
+ // Now that we have copied the live bytes to the start of the buffer,
+ // we read from z.r into the remainder.
+ var n int
+ n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)])
+ if n == 0 {
+ z.err = z.readErr
+ return 0
+ }
+ z.buf = buf1[:d+n]
+ }
+ x := z.buf[z.raw.end]
+ z.raw.end++
+ if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf {
+ z.err = ErrBufferExceeded
+ return 0
+ }
+ return x
+}
+
+// Buffered returns a slice containing data buffered but not yet tokenized.
+func (z *Tokenizer) Buffered() []byte {
+ return z.buf[z.raw.end:]
+}
+
+// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil).
+// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil)
+// too many times in succession.
+func readAtLeastOneByte(r io.Reader, b []byte) (int, error) {
+ for i := 0; i < 100; i++ {
+ n, err := r.Read(b)
+ if n != 0 || err != nil {
+ return n, err
+ }
+ }
+ return 0, io.ErrNoProgress
+}
+
+// skipWhiteSpace skips past any white space.
+func (z *Tokenizer) skipWhiteSpace() {
+ if z.err != nil {
+ return
+ }
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f':
+ // No-op.
+ default:
+ z.raw.end--
+ return
+ }
+ }
+}
+
+// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and
+// is typically something like "script" or "textarea".
+func (z *Tokenizer) readRawOrRCDATA() {
+ if z.rawTag == "script" {
+ z.readScript()
+ z.textIsRaw = true
+ z.rawTag = ""
+ return
+ }
+loop:
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c != '<' {
+ continue loop
+ }
+ c = z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c != '/' {
+ continue loop
+ }
+ if z.readRawEndTag() || z.err != nil {
+ break loop
+ }
+ }
+ z.data.end = z.raw.end
+ // A textarea's or title's RCDATA can contain escaped entities.
+ z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title"
+ z.rawTag = ""
+}
+
+// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag.
+// If it succeeds, it backs up the input position to reconsume the tag and
+// returns true. Otherwise it returns false. The opening "</" has already been
+// consumed.
+func (z *Tokenizer) readRawEndTag() bool {
+ for i := 0; i < len(z.rawTag); i++ {
+ c := z.readByte()
+ if z.err != nil {
+ return false
+ }
+ if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') {
+ z.raw.end--
+ return false
+ }
+ }
+ c := z.readByte()
+ if z.err != nil {
+ return false
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f', '/', '>':
+ // The 3 is 2 for the leading "</" plus 1 for the trailing character c.
+ z.raw.end -= 3 + len(z.rawTag)
+ return true
+ }
+ z.raw.end--
+ return false
+}
+
+// readScript reads until the next </script> tag, following the byzantine
+// rules for escaping/hiding the closing tag.
+func (z *Tokenizer) readScript() {
+ defer func() {
+ z.data.end = z.raw.end
+ }()
+ var c byte
+
+scriptData:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '<' {
+ goto scriptDataLessThanSign
+ }
+ goto scriptData
+
+scriptDataLessThanSign:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '/':
+ goto scriptDataEndTagOpen
+ case '!':
+ goto scriptDataEscapeStart
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEndTagOpen:
+ if z.readRawEndTag() || z.err != nil {
+ return
+ }
+ goto scriptData
+
+scriptDataEscapeStart:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '-' {
+ goto scriptDataEscapeStartDash
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEscapeStartDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '-' {
+ goto scriptDataEscapedDashDash
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEscaped:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataEscapedDash
+ case '<':
+ goto scriptDataEscapedLessThanSign
+ }
+ goto scriptDataEscaped
+
+scriptDataEscapedDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataEscapedDashDash
+ case '<':
+ goto scriptDataEscapedLessThanSign
+ }
+ goto scriptDataEscaped
+
+scriptDataEscapedDashDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataEscapedDashDash
+ case '<':
+ goto scriptDataEscapedLessThanSign
+ case '>':
+ goto scriptData
+ }
+ goto scriptDataEscaped
+
+scriptDataEscapedLessThanSign:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '/' {
+ goto scriptDataEscapedEndTagOpen
+ }
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
+ goto scriptDataDoubleEscapeStart
+ }
+ z.raw.end--
+ goto scriptData
+
+scriptDataEscapedEndTagOpen:
+ if z.readRawEndTag() || z.err != nil {
+ return
+ }
+ goto scriptDataEscaped
+
+scriptDataDoubleEscapeStart:
+ z.raw.end--
+ for i := 0; i < len("script"); i++ {
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c != "script"[i] && c != "SCRIPT"[i] {
+ z.raw.end--
+ goto scriptDataEscaped
+ }
+ }
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f', '/', '>':
+ goto scriptDataDoubleEscaped
+ }
+ z.raw.end--
+ goto scriptDataEscaped
+
+scriptDataDoubleEscaped:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataDoubleEscapedDash
+ case '<':
+ goto scriptDataDoubleEscapedLessThanSign
+ }
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapedDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataDoubleEscapedDashDash
+ case '<':
+ goto scriptDataDoubleEscapedLessThanSign
+ }
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapedDashDash:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch c {
+ case '-':
+ goto scriptDataDoubleEscapedDashDash
+ case '<':
+ goto scriptDataDoubleEscapedLessThanSign
+ case '>':
+ goto scriptData
+ }
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapedLessThanSign:
+ c = z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c == '/' {
+ goto scriptDataDoubleEscapeEnd
+ }
+ z.raw.end--
+ goto scriptDataDoubleEscaped
+
+scriptDataDoubleEscapeEnd:
+ if z.readRawEndTag() {
+ z.raw.end += len("</script>")
+ goto scriptDataEscaped
+ }
+ if z.err != nil {
+ return
+ }
+ goto scriptDataDoubleEscaped
+}
+
+// readComment reads the next comment token starting with "<!--". The opening
+// "<!--" has already been consumed.
+func (z *Tokenizer) readComment() {
+ z.data.start = z.raw.end
+ defer func() {
+ if z.data.end < z.data.start {
+ // It's a comment with no data, like <!-->.
+ z.data.end = z.data.start
+ }
+ }()
+ for dashCount := 2; ; {
+ c := z.readByte()
+ if z.err != nil {
+ // Ignore up to two dashes at EOF.
+ if dashCount > 2 {
+ dashCount = 2
+ }
+ z.data.end = z.raw.end - dashCount
+ return
+ }
+ switch c {
+ case '-':
+ dashCount++
+ continue
+ case '>':
+ if dashCount >= 2 {
+ z.data.end = z.raw.end - len("-->")
+ return
+ }
+ case '!':
+ if dashCount >= 2 {
+ c = z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return
+ }
+ if c == '>' {
+ z.data.end = z.raw.end - len("--!>")
+ return
+ }
+ }
+ }
+ dashCount = 0
+ }
+}
+
+// readUntilCloseAngle reads until the next ">".
+func (z *Tokenizer) readUntilCloseAngle() {
+ z.data.start = z.raw.end
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return
+ }
+ if c == '>' {
+ z.data.end = z.raw.end - len(">")
+ return
+ }
+ }
+}
+
+// readMarkupDeclaration reads the next token starting with "<!". It might be
+// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or
+// "<!a bogus comment". The opening "<!" has already been consumed.
+func (z *Tokenizer) readMarkupDeclaration() TokenType {
+ z.data.start = z.raw.end
+ var c [2]byte
+ for i := 0; i < 2; i++ {
+ c[i] = z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return CommentToken
+ }
+ }
+ if c[0] == '-' && c[1] == '-' {
+ z.readComment()
+ return CommentToken
+ }
+ z.raw.end -= 2
+ if z.readDoctype() {
+ return DoctypeToken
+ }
+ if z.allowCDATA && z.readCDATA() {
+ z.convertNUL = true
+ return TextToken
+ }
+ // It's a bogus comment.
+ z.readUntilCloseAngle()
+ return CommentToken
+}
+
+// readDoctype attempts to read a doctype declaration and returns true if
+// successful. The opening "<!" has already been consumed.
+func (z *Tokenizer) readDoctype() bool {
+ const s = "DOCTYPE"
+ for i := 0; i < len(s); i++ {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return false
+ }
+ if c != s[i] && c != s[i]+('a'-'A') {
+ // Back up to read the fragment of "DOCTYPE" again.
+ z.raw.end = z.data.start
+ return false
+ }
+ }
+ if z.skipWhiteSpace(); z.err != nil {
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ return true
+ }
+ z.readUntilCloseAngle()
+ return true
+}
+
+// readCDATA attempts to read a CDATA section and returns true if
+// successful. The opening "<!" has already been consumed.
+func (z *Tokenizer) readCDATA() bool {
+ const s = "[CDATA["
+ for i := 0; i < len(s); i++ {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return false
+ }
+ if c != s[i] {
+ // Back up to read the fragment of "[CDATA[" again.
+ z.raw.end = z.data.start
+ return false
+ }
+ }
+ z.data.start = z.raw.end
+ brackets := 0
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return true
+ }
+ switch c {
+ case ']':
+ brackets++
+ case '>':
+ if brackets >= 2 {
+ z.data.end = z.raw.end - len("]]>")
+ return true
+ }
+ brackets = 0
+ default:
+ brackets = 0
+ }
+ }
+}
+
+// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end]
+// case-insensitively matches any element of ss.
+func (z *Tokenizer) startTagIn(ss ...string) bool {
+loop:
+ for _, s := range ss {
+ if z.data.end-z.data.start != len(s) {
+ continue loop
+ }
+ for i := 0; i < len(s); i++ {
+ c := z.buf[z.data.start+i]
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ if c != s[i] {
+ continue loop
+ }
+ }
+ return true
+ }
+ return false
+}
+
+// readStartTag reads the next start tag token. The opening "<a" has already
+// been consumed, where 'a' means anything in [A-Za-z].
+func (z *Tokenizer) readStartTag() TokenType {
+ z.readTag(true)
+ if z.err != nil {
+ return ErrorToken
+ }
+ // Several tags flag the tokenizer's next token as raw.
+ c, raw := z.buf[z.data.start], false
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ switch c {
+ case 'i':
+ raw = z.startTagIn("iframe")
+ case 'n':
+ raw = z.startTagIn("noembed", "noframes", "noscript")
+ case 'p':
+ raw = z.startTagIn("plaintext")
+ case 's':
+ raw = z.startTagIn("script", "style")
+ case 't':
+ raw = z.startTagIn("textarea", "title")
+ case 'x':
+ raw = z.startTagIn("xmp")
+ }
+ if raw {
+ z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end]))
+ }
+ // Look for a self-closing token like "<br/>".
+ if z.err == nil && z.buf[z.raw.end-2] == '/' {
+ return SelfClosingTagToken
+ }
+ return StartTagToken
+}
+
+// readTag reads the next tag token and its attributes. If saveAttr, those
+// attributes are saved in z.attr, otherwise z.attr is set to an empty slice.
+// The opening "<a" or "</a" has already been consumed, where 'a' means anything
+// in [A-Za-z].
+func (z *Tokenizer) readTag(saveAttr bool) {
+ z.attr = z.attr[:0]
+ z.nAttrReturned = 0
+ // Read the tag name and attribute key/value pairs.
+ z.readTagName()
+ if z.skipWhiteSpace(); z.err != nil {
+ return
+ }
+ for {
+ c := z.readByte()
+ if z.err != nil || c == '>' {
+ break
+ }
+ z.raw.end--
+ z.readTagAttrKey()
+ z.readTagAttrVal()
+ // Save pendingAttr if saveAttr and that attribute has a non-empty key.
+ if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end {
+ z.attr = append(z.attr, z.pendingAttr)
+ }
+ if z.skipWhiteSpace(); z.err != nil {
+ break
+ }
+ }
+}
+
+// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end)
+// is positioned such that the first byte of the tag name (the "d" in "<div")
+// has already been consumed.
+func (z *Tokenizer) readTagName() {
+ z.data.start = z.raw.end - 1
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.data.end = z.raw.end
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f':
+ z.data.end = z.raw.end - 1
+ return
+ case '/', '>':
+ z.raw.end--
+ z.data.end = z.raw.end
+ return
+ }
+ }
+}
+
+// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>".
+// Precondition: z.err == nil.
+func (z *Tokenizer) readTagAttrKey() {
+ z.pendingAttr[0].start = z.raw.end
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.pendingAttr[0].end = z.raw.end
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f', '/':
+ z.pendingAttr[0].end = z.raw.end - 1
+ return
+ case '=', '>':
+ z.raw.end--
+ z.pendingAttr[0].end = z.raw.end
+ return
+ }
+ }
+}
+
+// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>".
+func (z *Tokenizer) readTagAttrVal() {
+ z.pendingAttr[1].start = z.raw.end
+ z.pendingAttr[1].end = z.raw.end
+ if z.skipWhiteSpace(); z.err != nil {
+ return
+ }
+ c := z.readByte()
+ if z.err != nil {
+ return
+ }
+ if c != '=' {
+ z.raw.end--
+ return
+ }
+ if z.skipWhiteSpace(); z.err != nil {
+ return
+ }
+ quote := z.readByte()
+ if z.err != nil {
+ return
+ }
+ switch quote {
+ case '>':
+ z.raw.end--
+ return
+
+ case '\'', '"':
+ z.pendingAttr[1].start = z.raw.end
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.pendingAttr[1].end = z.raw.end
+ return
+ }
+ if c == quote {
+ z.pendingAttr[1].end = z.raw.end - 1
+ return
+ }
+ }
+
+ default:
+ z.pendingAttr[1].start = z.raw.end - 1
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ z.pendingAttr[1].end = z.raw.end
+ return
+ }
+ switch c {
+ case ' ', '\n', '\r', '\t', '\f':
+ z.pendingAttr[1].end = z.raw.end - 1
+ return
+ case '>':
+ z.raw.end--
+ z.pendingAttr[1].end = z.raw.end
+ return
+ }
+ }
+ }
+}
+
+// Next scans the next token and returns its type.
+func (z *Tokenizer) Next() TokenType {
+ z.raw.start = z.raw.end
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ if z.err != nil {
+ z.tt = ErrorToken
+ return z.tt
+ }
+ if z.rawTag != "" {
+ if z.rawTag == "plaintext" {
+ // Read everything up to EOF.
+ for z.err == nil {
+ z.readByte()
+ }
+ z.data.end = z.raw.end
+ z.textIsRaw = true
+ } else {
+ z.readRawOrRCDATA()
+ }
+ if z.data.end > z.data.start {
+ z.tt = TextToken
+ z.convertNUL = true
+ return z.tt
+ }
+ }
+ z.textIsRaw = false
+ z.convertNUL = false
+
+loop:
+ for {
+ c := z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c != '<' {
+ continue loop
+ }
+
+ // Check if the '<' we have just read is part of a tag, comment
+ // or doctype. If not, it's part of the accumulated text token.
+ c = z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ var tokenType TokenType
+ switch {
+ case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
+ tokenType = StartTagToken
+ case c == '/':
+ tokenType = EndTagToken
+ case c == '!' || c == '?':
+ // We use CommentToken to mean any of "<!--actual comments-->",
+ // "<!DOCTYPE declarations>" and "<?xml processing instructions?>".
+ tokenType = CommentToken
+ default:
+ // Reconsume the current character.
+ z.raw.end--
+ continue
+ }
+
+ // We have a non-text token, but we might have accumulated some text
+ // before that. If so, we return the text first, and return the non-
+ // text token on the subsequent call to Next.
+ if x := z.raw.end - len("<a"); z.raw.start < x {
+ z.raw.end = x
+ z.data.end = x
+ z.tt = TextToken
+ return z.tt
+ }
+ switch tokenType {
+ case StartTagToken:
+ z.tt = z.readStartTag()
+ return z.tt
+ case EndTagToken:
+ c = z.readByte()
+ if z.err != nil {
+ break loop
+ }
+ if c == '>' {
+ // "</>" does not generate a token at all. Generate an empty comment
+ // to allow passthrough clients to pick up the data using Raw.
+ // Reset the tokenizer state and start again.
+ z.tt = CommentToken
+ return z.tt
+ }
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
+ z.readTag(false)
+ if z.err != nil {
+ z.tt = ErrorToken
+ } else {
+ z.tt = EndTagToken
+ }
+ return z.tt
+ }
+ z.raw.end--
+ z.readUntilCloseAngle()
+ z.tt = CommentToken
+ return z.tt
+ case CommentToken:
+ if c == '!' {
+ z.tt = z.readMarkupDeclaration()
+ return z.tt
+ }
+ z.raw.end--
+ z.readUntilCloseAngle()
+ z.tt = CommentToken
+ return z.tt
+ }
+ }
+ if z.raw.start < z.raw.end {
+ z.data.end = z.raw.end
+ z.tt = TextToken
+ return z.tt
+ }
+ z.tt = ErrorToken
+ return z.tt
+}
+
+// Raw returns the unmodified text of the current token. Calling Next, Token,
+// Text, TagName or TagAttr may change the contents of the returned slice.
+func (z *Tokenizer) Raw() []byte {
+ return z.buf[z.raw.start:z.raw.end]
+}
+
+// convertNewlines converts "\r" and "\r\n" in s to "\n".
+// The conversion happens in place, but the resulting slice may be shorter.
+func convertNewlines(s []byte) []byte {
+ for i, c := range s {
+ if c != '\r' {
+ continue
+ }
+
+ src := i + 1
+ if src >= len(s) || s[src] != '\n' {
+ s[i] = '\n'
+ continue
+ }
+
+ dst := i
+ for src < len(s) {
+ if s[src] == '\r' {
+ if src+1 < len(s) && s[src+1] == '\n' {
+ src++
+ }
+ s[dst] = '\n'
+ } else {
+ s[dst] = s[src]
+ }
+ src++
+ dst++
+ }
+ return s[:dst]
+ }
+ return s
+}
+
+var (
+ nul = []byte("\x00")
+ replacement = []byte("\ufffd")
+)
+
+// Text returns the unescaped text of a text, comment or doctype token. The
+// contents of the returned slice may change on the next call to Next.
+func (z *Tokenizer) Text() []byte {
+ switch z.tt {
+ case TextToken, CommentToken, DoctypeToken:
+ s := z.buf[z.data.start:z.data.end]
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ s = convertNewlines(s)
+ if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) {
+ s = bytes.Replace(s, nul, replacement, -1)
+ }
+ if !z.textIsRaw {
+ s = unescape(s, false)
+ }
+ return s
+ }
+ return nil
+}
+
+// TagName returns the lower-cased name of a tag token (the `img` out of
+// `<IMG SRC="foo">`) and whether the tag has attributes.
+// The contents of the returned slice may change on the next call to Next.
+func (z *Tokenizer) TagName() (name []byte, hasAttr bool) {
+ if z.data.start < z.data.end {
+ switch z.tt {
+ case StartTagToken, EndTagToken, SelfClosingTagToken:
+ s := z.buf[z.data.start:z.data.end]
+ z.data.start = z.raw.end
+ z.data.end = z.raw.end
+ return lower(s), z.nAttrReturned < len(z.attr)
+ }
+ }
+ return nil, false
+}
+
+// TagAttr returns the lower-cased key and unescaped value of the next unparsed
+// attribute for the current tag token and whether there are more attributes.
+// The contents of the returned slices may change on the next call to Next.
+func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
+ if z.nAttrReturned < len(z.attr) {
+ switch z.tt {
+ case StartTagToken, SelfClosingTagToken:
+ x := z.attr[z.nAttrReturned]
+ z.nAttrReturned++
+ key = z.buf[x[0].start:x[0].end]
+ val = z.buf[x[1].start:x[1].end]
+ return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr)
+ }
+ }
+ return nil, nil, false
+}
+
+// Token returns the next Token. The result's Data and Attr values remain valid
+// after subsequent Next calls.
+func (z *Tokenizer) Token() Token {
+ t := Token{Type: z.tt}
+ switch z.tt {
+ case TextToken, CommentToken, DoctypeToken:
+ t.Data = string(z.Text())
+ case StartTagToken, SelfClosingTagToken, EndTagToken:
+ name, moreAttr := z.TagName()
+ for moreAttr {
+ var key, val []byte
+ key, val, moreAttr = z.TagAttr()
+ t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)})
+ }
+ if a := atom.Lookup(name); a != 0 {
+ t.DataAtom, t.Data = a, a.String()
+ } else {
+ t.DataAtom, t.Data = 0, string(name)
+ }
+ }
+ return t
+}
+
+// SetMaxBuf sets a limit on the amount of data buffered during tokenization.
+// A value of 0 means unlimited.
+func (z *Tokenizer) SetMaxBuf(n int) {
+ z.maxBuf = n
+}
+
+// NewTokenizer returns a new HTML Tokenizer for the given Reader.
+// The input is assumed to be UTF-8 encoded.
+func NewTokenizer(r io.Reader) *Tokenizer {
+ return NewTokenizerFragment(r, "")
+}
+
+// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for
+// tokenizing an existing element's InnerHTML fragment. contextTag is that
+// element's tag, such as "div" or "iframe".
+//
+// For example, how the InnerHTML "a<b" is tokenized depends on whether it is
+// for a <p> tag or a <script> tag.
+//
+// The input is assumed to be UTF-8 encoded.
+func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer {
+ z := &Tokenizer{
+ r: r,
+ buf: make([]byte, 0, 4096),
+ }
+ if contextTag != "" {
+ switch s := strings.ToLower(contextTag); s {
+ case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp":
+ z.rawTag = s
+ }
+ }
+ return z
+}
diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/golang.org/x/sync/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/sync/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/sync/README.md b/vendor/golang.org/x/sync/README.md
new file mode 100644
index 000000000..1f8436cc9
--- /dev/null
+++ b/vendor/golang.org/x/sync/README.md
@@ -0,0 +1,18 @@
+# Go Sync
+
+This repository provides Go concurrency primitives in addition to the
+ones provided by the language and "sync" and "sync/atomic" packages.
+
+## Download/Install
+
+The easiest way to install is to run `go get -u golang.org/x/sync`. You can
+also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`.
+
+## Report Issues / Send Patches
+
+This repository uses Gerrit for code changes. To learn how to submit changes to
+this repository, see https://golang.org/doc/contribute.html.
+
+The main issue tracker for the sync repository is located at
+https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the
+subject line, so it is easy to find.
diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go
new file mode 100644
index 000000000..ac53e733e
--- /dev/null
+++ b/vendor/golang.org/x/sync/semaphore/semaphore.go
@@ -0,0 +1,127 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package semaphore provides a weighted semaphore implementation.
+package semaphore // import "golang.org/x/sync/semaphore"
+
+import (
+ "container/list"
+ "context"
+ "sync"
+)
+
+type waiter struct {
+ n int64
+ ready chan<- struct{} // Closed when semaphore acquired.
+}
+
+// NewWeighted creates a new weighted semaphore with the given
+// maximum combined weight for concurrent access.
+func NewWeighted(n int64) *Weighted {
+ w := &Weighted{size: n}
+ return w
+}
+
+// Weighted provides a way to bound concurrent access to a resource.
+// The callers can request access with a given weight.
+type Weighted struct {
+ size int64
+ cur int64
+ mu sync.Mutex
+ waiters list.List
+}
+
+// Acquire acquires the semaphore with a weight of n, blocking until resources
+// are available or ctx is done. On success, returns nil. On failure, returns
+// ctx.Err() and leaves the semaphore unchanged.
+//
+// If ctx is already done, Acquire may still succeed without blocking.
+func (s *Weighted) Acquire(ctx context.Context, n int64) error {
+ s.mu.Lock()
+ if s.size-s.cur >= n && s.waiters.Len() == 0 {
+ s.cur += n
+ s.mu.Unlock()
+ return nil
+ }
+
+ if n > s.size {
+ // Don't make other Acquire calls block on one that's doomed to fail.
+ s.mu.Unlock()
+ <-ctx.Done()
+ return ctx.Err()
+ }
+
+ ready := make(chan struct{})
+ w := waiter{n: n, ready: ready}
+ elem := s.waiters.PushBack(w)
+ s.mu.Unlock()
+
+ select {
+ case <-ctx.Done():
+ err := ctx.Err()
+ s.mu.Lock()
+ select {
+ case <-ready:
+ // Acquired the semaphore after we were canceled. Rather than trying to
+ // fix up the queue, just pretend we didn't notice the cancelation.
+ err = nil
+ default:
+ s.waiters.Remove(elem)
+ }
+ s.mu.Unlock()
+ return err
+
+ case <-ready:
+ return nil
+ }
+}
+
+// TryAcquire acquires the semaphore with a weight of n without blocking.
+// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
+func (s *Weighted) TryAcquire(n int64) bool {
+ s.mu.Lock()
+ success := s.size-s.cur >= n && s.waiters.Len() == 0
+ if success {
+ s.cur += n
+ }
+ s.mu.Unlock()
+ return success
+}
+
+// Release releases the semaphore with a weight of n.
+func (s *Weighted) Release(n int64) {
+ s.mu.Lock()
+ s.cur -= n
+ if s.cur < 0 {
+ s.mu.Unlock()
+ panic("semaphore: bad release")
+ }
+ for {
+ next := s.waiters.Front()
+ if next == nil {
+ break // No more waiters blocked.
+ }
+
+ w := next.Value.(waiter)
+ if s.size-s.cur < w.n {
+ // Not enough tokens for the next waiter. We could keep going (to try to
+ // find a waiter with a smaller request), but under load that could cause
+ // starvation for large requests; instead, we leave all remaining waiters
+ // blocked.
+ //
+ // Consider a semaphore used as a read-write lock, with N tokens, N
+ // readers, and one writer. Each reader can Acquire(1) to obtain a read
+ // lock. The writer can Acquire(N) to obtain a write lock, excluding all
+ // of the readers. If we allow the readers to jump ahead in the queue,
+ // the writer will starve — there is always one token available for every
+ // reader.
+ break
+ }
+
+ s.cur += w.n
+ s.waiters.Remove(next)
+ close(w.ready)
+ }
+ s.mu.Unlock()
+}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/README.md b/vendor/gopkg.in/cheggaaa/pb.v1/README.md
index 31babb305..1295df70e 100644
--- a/vendor/gopkg.in/cheggaaa/pb.v1/README.md
+++ b/vendor/gopkg.in/cheggaaa/pb.v1/README.md
@@ -1,7 +1,8 @@
# Terminal progress bar for Go
-Simple progress bar for console programs.
-
+Simple progress bar for console programs.
+
+Please check the new version https://github.com/cheggaaa/pb/tree/v2 (currently, it's beta)
## Installation
@@ -170,7 +171,7 @@ The result will be as follows:
```
$ go run example/multiple.go
-First 141 / 1000 [===============>---------------------------------------] 14.10 % 44s
-Second 139 / 1000 [==============>---------------------------------------] 13.90 % 44s
-Third 152 / 1000 [================>--------------------------------------] 15.20 % 40s
+First 34 / 200 [=========>---------------------------------------------] 17.00% 00m08s
+Second 42 / 200 [===========>------------------------------------------] 21.00% 00m06s
+Third 36 / 200 [=========>---------------------------------------------] 18.00% 00m08s
```
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/format.go b/vendor/gopkg.in/cheggaaa/pb.v1/format.go
index d5aeff793..8bb8a7a1d 100644
--- a/vendor/gopkg.in/cheggaaa/pb.v1/format.go
+++ b/vendor/gopkg.in/cheggaaa/pb.v1/format.go
@@ -2,7 +2,6 @@ package pb
import (
"fmt"
- "strings"
"time"
)
@@ -11,12 +10,26 @@ type Units int
const (
// U_NO are default units, they represent a simple value and are not formatted at all.
U_NO Units = iota
- // U_BYTES units are formatted in a human readable way (b, Bb, Mb, ...)
+ // U_BYTES units are formatted in a human readable way (B, KiB, MiB, ...)
U_BYTES
+ // U_BYTES_DEC units are like U_BYTES, but base 10 (B, KB, MB, ...)
+ U_BYTES_DEC
// U_DURATION units are formatted in a human readable way (3h14m15s)
U_DURATION
)
+const (
+ KiB = 1024
+ MiB = 1048576
+ GiB = 1073741824
+ TiB = 1099511627776
+
+ KB = 1e3
+ MB = 1e6
+ GB = 1e9
+ TB = 1e12
+)
+
func Format(i int64) *formatter {
return &formatter{n: i}
}
@@ -28,11 +41,6 @@ type formatter struct {
perSec bool
}
-func (f *formatter) Value(n int64) *formatter {
- f.n = n
- return f
-}
-
func (f *formatter) To(unit Units) *formatter {
f.unit = unit
return f
@@ -52,13 +60,10 @@ func (f *formatter) String() (out string) {
switch f.unit {
case U_BYTES:
out = formatBytes(f.n)
+ case U_BYTES_DEC:
+ out = formatBytesDec(f.n)
case U_DURATION:
- d := time.Duration(f.n)
- if d > time.Hour*24 {
- out = fmt.Sprintf("%dd", d/24/time.Hour)
- d -= (d / time.Hour / 24) * (time.Hour * 24)
- }
- out = fmt.Sprintf("%s%v", out, d)
+ out = formatDuration(f.n)
default:
out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n)
}
@@ -68,20 +73,53 @@ func (f *formatter) String() (out string) {
return
}
-// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
+// Convert bytes to human readable string. Like 2 MiB, 64.2 KiB, 52 B
func formatBytes(i int64) (result string) {
switch {
- case i > (1024 * 1024 * 1024 * 1024):
- result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024)
- case i > (1024 * 1024 * 1024):
- result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024)
- case i > (1024 * 1024):
- result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024)
- case i > 1024:
- result = fmt.Sprintf("%.02f KB", float64(i)/1024)
+ case i >= TiB:
+ result = fmt.Sprintf("%.02f TiB", float64(i)/TiB)
+ case i >= GiB:
+ result = fmt.Sprintf("%.02f GiB", float64(i)/GiB)
+ case i >= MiB:
+ result = fmt.Sprintf("%.02f MiB", float64(i)/MiB)
+ case i >= KiB:
+ result = fmt.Sprintf("%.02f KiB", float64(i)/KiB)
default:
result = fmt.Sprintf("%d B", i)
}
- result = strings.Trim(result, " ")
+ return
+}
+
+// Convert bytes to base-10 human readable string. Like 2 MB, 64.2 KB, 52 B
+func formatBytesDec(i int64) (result string) {
+ switch {
+ case i >= TB:
+ result = fmt.Sprintf("%.02f TB", float64(i)/TB)
+ case i >= GB:
+ result = fmt.Sprintf("%.02f GB", float64(i)/GB)
+ case i >= MB:
+ result = fmt.Sprintf("%.02f MB", float64(i)/MB)
+ case i >= KB:
+ result = fmt.Sprintf("%.02f KB", float64(i)/KB)
+ default:
+ result = fmt.Sprintf("%d B", i)
+ }
+ return
+}
+
+func formatDuration(n int64) (result string) {
+ d := time.Duration(n)
+ if d > time.Hour*24 {
+ result = fmt.Sprintf("%dd", d/24/time.Hour)
+ d -= (d / time.Hour / 24) * (time.Hour * 24)
+ }
+ if d > time.Hour {
+ result = fmt.Sprintf("%s%dh", result, d/time.Hour)
+ d -= d / time.Hour * time.Hour
+ }
+ m := d / time.Minute
+ d -= m * time.Minute
+ s := d / time.Second
+ result = fmt.Sprintf("%s%02dm%02ds", result, m, s)
return
}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb.go
index f1f15bff3..e03291b89 100644
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pb.go
+++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb.go
@@ -13,7 +13,7 @@ import (
)
// Current version
-const Version = "1.0.6"
+const Version = "1.0.27"
const (
// Default refresh rate - 200ms
@@ -37,18 +37,17 @@ func New(total int) *ProgressBar {
// Create new progress bar object using int64 as total
func New64(total int64) *ProgressBar {
pb := &ProgressBar{
- Total: total,
- RefreshRate: DEFAULT_REFRESH_RATE,
- ShowPercent: true,
- ShowCounters: true,
- ShowBar: true,
- ShowTimeLeft: true,
- ShowFinalTime: true,
- Units: U_NO,
- ManualUpdate: false,
- finish: make(chan struct{}),
- currentValue: -1,
- mu: new(sync.Mutex),
+ Total: total,
+ RefreshRate: DEFAULT_REFRESH_RATE,
+ ShowPercent: true,
+ ShowCounters: true,
+ ShowBar: true,
+ ShowTimeLeft: true,
+ ShowElapsedTime: false,
+ ShowFinalTime: true,
+ Units: U_NO,
+ ManualUpdate: false,
+ finish: make(chan struct{}),
}
return pb.Format(FORMAT)
}
@@ -67,13 +66,14 @@ func StartNew(total int) *ProgressBar {
type Callback func(out string)
type ProgressBar struct {
- current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278)
+ current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278)
+ previous int64
Total int64
RefreshRate time.Duration
ShowPercent, ShowCounters bool
ShowSpeed, ShowTimeLeft, ShowBar bool
- ShowFinalTime bool
+ ShowFinalTime, ShowElapsedTime bool
Output io.Writer
Callback Callback
NotPrint bool
@@ -91,13 +91,14 @@ type ProgressBar struct {
finish chan struct{}
isFinish bool
- startTime time.Time
- startValue int64
- currentValue int64
+ startTime time.Time
+ startValue int64
+
+ changeTime time.Time
prefix, postfix string
- mu *sync.Mutex
+ mu sync.Mutex
lastPrint string
BarStart string
@@ -112,8 +113,8 @@ type ProgressBar struct {
// Start print
func (pb *ProgressBar) Start() *ProgressBar {
pb.startTime = time.Now()
- pb.startValue = pb.current
- if pb.Total == 0 {
+ pb.startValue = atomic.LoadInt64(&pb.current)
+ if atomic.LoadInt64(&pb.Total) == 0 {
pb.ShowTimeLeft = false
pb.ShowPercent = false
pb.AutoStat = false
@@ -158,12 +159,16 @@ func (pb *ProgressBar) Add64(add int64) int64 {
// Set prefix string
func (pb *ProgressBar) Prefix(prefix string) *ProgressBar {
+ pb.mu.Lock()
+ defer pb.mu.Unlock()
pb.prefix = prefix
return pb
}
// Set postfix string
func (pb *ProgressBar) Postfix(postfix string) *ProgressBar {
+ pb.mu.Lock()
+ defer pb.mu.Unlock()
pb.postfix = postfix
return pb
}
@@ -173,7 +178,7 @@ func (pb *ProgressBar) Postfix(postfix string) *ProgressBar {
// Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter
func (pb *ProgressBar) Format(format string) *ProgressBar {
var formatEntries []string
- if len(format) == 5 {
+ if utf8.RuneCountInString(format) == 5 {
formatEntries = strings.Split(format, "")
} else {
formatEntries = strings.Split(format, "\x00")
@@ -221,7 +226,9 @@ func (pb *ProgressBar) Finish() {
//Protect multiple calls
pb.finishOnce.Do(func() {
close(pb.finish)
- pb.write(atomic.LoadInt64(&pb.current))
+ pb.write(atomic.LoadInt64(&pb.Total), atomic.LoadInt64(&pb.current))
+ pb.mu.Lock()
+ defer pb.mu.Unlock()
switch {
case pb.Output != nil:
fmt.Fprintln(pb.Output)
@@ -232,6 +239,13 @@ func (pb *ProgressBar) Finish() {
})
}
+// IsFinished return boolean
+func (pb *ProgressBar) IsFinished() bool {
+ pb.mu.Lock()
+ defer pb.mu.Unlock()
+ return pb.isFinish
+}
+
// End print and write string 'str'
func (pb *ProgressBar) FinishPrint(str string) {
pb.Finish()
@@ -262,16 +276,18 @@ func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader {
return &Reader{r, pb}
}
-func (pb *ProgressBar) write(current int64) {
+func (pb *ProgressBar) write(total, current int64) {
+ pb.mu.Lock()
+ defer pb.mu.Unlock()
width := pb.GetWidth()
- var percentBox, countersBox, timeLeftBox, speedBox, barBox, end, out string
+ var percentBox, countersBox, timeLeftBox, timeSpentBox, speedBox, barBox, end, out string
// percents
if pb.ShowPercent {
var percent float64
- if pb.Total > 0 {
- percent = float64(current) / (float64(pb.Total) / float64(100))
+ if total > 0 {
+ percent = float64(current) / (float64(total) / float64(100))
} else {
percent = float64(current) / float64(100)
}
@@ -281,17 +297,24 @@ func (pb *ProgressBar) write(current int64) {
// counters
if pb.ShowCounters {
current := Format(current).To(pb.Units).Width(pb.UnitsWidth)
- if pb.Total > 0 {
- total := Format(pb.Total).To(pb.Units).Width(pb.UnitsWidth)
- countersBox = fmt.Sprintf(" %s / %s ", current, total)
+ if total > 0 {
+ totalS := Format(total).To(pb.Units).Width(pb.UnitsWidth)
+ countersBox = fmt.Sprintf(" %s / %s ", current, totalS)
} else {
countersBox = fmt.Sprintf(" %s / ? ", current)
}
}
// time left
- fromStart := time.Now().Sub(pb.startTime)
currentFromStart := current - pb.startValue
+ fromStart := time.Now().Sub(pb.startTime)
+ lastChangeTime := pb.changeTime
+ fromChange := lastChangeTime.Sub(pb.startTime)
+
+ if pb.ShowElapsedTime {
+ timeSpentBox = fmt.Sprintf(" %s ", (fromStart/time.Second)*time.Second)
+ }
+
select {
case <-pb.finish:
if pb.ShowFinalTime {
@@ -301,17 +324,20 @@ func (pb *ProgressBar) write(current int64) {
}
default:
if pb.ShowTimeLeft && currentFromStart > 0 {
- perEntry := fromStart / time.Duration(currentFromStart)
+ perEntry := fromChange / time.Duration(currentFromStart)
var left time.Duration
- if pb.Total > 0 {
- left = time.Duration(pb.Total-currentFromStart) * perEntry
+ if total > 0 {
+ left = time.Duration(total-currentFromStart) * perEntry
+ left -= time.Since(lastChangeTime)
left = (left / time.Second) * time.Second
} else {
left = time.Duration(currentFromStart) * perEntry
left = (left / time.Second) * time.Second
}
- timeLeft := Format(int64(left)).To(U_DURATION).String()
- timeLeftBox = fmt.Sprintf(" %s", timeLeft)
+ if left > 0 {
+ timeLeft := Format(int64(left)).To(U_DURATION).String()
+ timeLeftBox = fmt.Sprintf(" %s", timeLeft)
+ }
}
}
@@ -326,30 +352,38 @@ func (pb *ProgressBar) write(current int64) {
speedBox = " " + Format(int64(speed)).To(pb.Units).Width(pb.UnitsWidth).PerSec().String()
}
- barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix)
+ barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeSpentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix)
// bar
if pb.ShowBar {
size := width - barWidth
if size > 0 {
- if pb.Total > 0 {
- curCount := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size)))
- emptCount := size - curCount
+ if total > 0 {
+ curSize := int(math.Ceil((float64(current) / float64(total)) * float64(size)))
+ emptySize := size - curSize
barBox = pb.BarStart
- if emptCount < 0 {
- emptCount = 0
+ if emptySize < 0 {
+ emptySize = 0
}
- if curCount > size {
- curCount = size
+ if curSize > size {
+ curSize = size
}
- if emptCount <= 0 {
- barBox += strings.Repeat(pb.Current, curCount)
- } else if curCount > 0 {
- barBox += strings.Repeat(pb.Current, curCount-1) + pb.CurrentN
+
+ cursorLen := escapeAwareRuneCountInString(pb.Current)
+ if emptySize <= 0 {
+ barBox += strings.Repeat(pb.Current, curSize/cursorLen)
+ } else if curSize > 0 {
+ cursorEndLen := escapeAwareRuneCountInString(pb.CurrentN)
+ cursorRepetitions := (curSize - cursorEndLen) / cursorLen
+ barBox += strings.Repeat(pb.Current, cursorRepetitions)
+ barBox += pb.CurrentN
}
- barBox += strings.Repeat(pb.Empty, emptCount) + pb.BarEnd
+
+ emptyLen := escapeAwareRuneCountInString(pb.Empty)
+ barBox += strings.Repeat(pb.Empty, emptySize/emptyLen)
+ barBox += pb.BarEnd
} else {
- barBox = pb.BarStart
pos := size - int(current)%int(size)
+ barBox = pb.BarStart
if pos-1 > 0 {
barBox += strings.Repeat(pb.Empty, pos-1)
}
@@ -363,17 +397,18 @@ func (pb *ProgressBar) write(current int64) {
}
// check len
- out = pb.prefix + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix
- if escapeAwareRuneCountInString(out) < width {
- end = strings.Repeat(" ", width-utf8.RuneCountInString(out))
+ out = pb.prefix + timeSpentBox + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix
+
+ if cl := escapeAwareRuneCountInString(out); cl < width {
+ end = strings.Repeat(" ", width-cl)
}
// and print!
- pb.mu.Lock()
pb.lastPrint = out + end
- pb.mu.Unlock()
+ isFinish := pb.isFinish
+
switch {
- case pb.isFinish:
+ case isFinish:
return
case pb.Output != nil:
fmt.Fprint(pb.Output, "\r"+out+end)
@@ -406,24 +441,55 @@ func (pb *ProgressBar) GetWidth() int {
// Write the current state of the progressbar
func (pb *ProgressBar) Update() {
c := atomic.LoadInt64(&pb.current)
- if pb.AlwaysUpdate || c != pb.currentValue {
- pb.write(c)
- pb.currentValue = c
+ p := atomic.LoadInt64(&pb.previous)
+ t := atomic.LoadInt64(&pb.Total)
+ if p != c {
+ pb.mu.Lock()
+ pb.changeTime = time.Now()
+ pb.mu.Unlock()
+ atomic.StoreInt64(&pb.previous, c)
}
+ pb.write(t, c)
if pb.AutoStat {
if c == 0 {
pb.startTime = time.Now()
pb.startValue = 0
- } else if c >= pb.Total && pb.isFinish != true {
+ } else if c >= t && pb.isFinish != true {
pb.Finish()
}
}
}
+// String return the last bar print
func (pb *ProgressBar) String() string {
+ pb.mu.Lock()
+ defer pb.mu.Unlock()
return pb.lastPrint
}
+// SetTotal atomically sets new total count
+func (pb *ProgressBar) SetTotal(total int) *ProgressBar {
+ return pb.SetTotal64(int64(total))
+}
+
+// SetTotal64 atomically sets new total count
+func (pb *ProgressBar) SetTotal64(total int64) *ProgressBar {
+ atomic.StoreInt64(&pb.Total, total)
+ return pb
+}
+
+// Reset bar and set new total count
+// Does effect only on finished bar
+func (pb *ProgressBar) Reset(total int) *ProgressBar {
+ pb.mu.Lock()
+ defer pb.mu.Unlock()
+ if pb.isFinish {
+ pb.SetTotal(total).Set(0)
+ atomic.StoreInt64(&pb.previous, 0)
+ }
+ return pb
+}
+
// Internal loop for refreshing the progressbar
func (pb *ProgressBar) refresher() {
for {
@@ -435,10 +501,3 @@ func (pb *ProgressBar) refresher() {
}
}
}
-
-type window struct {
- Row uint16
- Col uint16
- Xpixel uint16
- Ypixel uint16
-}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go
index d85dbc3b2..17168f39a 100644
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go
+++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go
@@ -1,4 +1,4 @@
-// +build appengine
+// +build appengine js
package pb
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go
deleted file mode 100644
index c06097b43..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_nix.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build linux darwin freebsd netbsd openbsd dragonfly
-// +build !appengine
-
-package pb
-
-import "syscall"
-
-const sysIoctl = syscall.SYS_IOCTL
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go
deleted file mode 100644
index b7d461e17..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_solaris.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +build solaris
-// +build !appengine
-
-package pb
-
-const sysIoctl = 54
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go
index 72f682835..9595e8236 100644
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go
+++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go
@@ -102,7 +102,7 @@ var echoLockMutex sync.Mutex
var oldState word
-func lockEcho() (quit chan int, err error) {
+func lockEcho() (shutdownCh chan struct{}, err error) {
echoLockMutex.Lock()
defer echoLockMutex.Unlock()
if echoLocked {
@@ -124,6 +124,8 @@ func lockEcho() (quit chan int, err error) {
err = fmt.Errorf("Can't set terminal settings: %v", e)
return
}
+
+ shutdownCh = make(chan struct{})
return
}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go
index 12e6fe6e2..af4251760 100644
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go
+++ b/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go
@@ -1,5 +1,5 @@
// +build linux darwin freebsd netbsd openbsd solaris dragonfly
-// +build !appengine
+// +build !appengine !js
package pb
@@ -8,101 +8,109 @@ import (
"fmt"
"os"
"os/signal"
- "runtime"
"sync"
"syscall"
- "unsafe"
-)
-const (
- TIOCGWINSZ = 0x5413
- TIOCGWINSZ_OSX = 1074295912
+ "golang.org/x/sys/unix"
)
-var tty *os.File
-
var ErrPoolWasStarted = errors.New("Bar pool was started")
-var echoLocked bool
-var echoLockMutex sync.Mutex
+var (
+ echoLockMutex sync.Mutex
+ origTermStatePtr *unix.Termios
+ tty *os.File
+ istty bool
+)
func init() {
+ echoLockMutex.Lock()
+ defer echoLockMutex.Unlock()
+
var err error
tty, err = os.Open("/dev/tty")
+ istty = true
if err != nil {
tty = os.Stdin
+ istty = false
}
}
// terminalWidth returns width of the terminal.
func terminalWidth() (int, error) {
- w := new(window)
- tio := syscall.TIOCGWINSZ
- if runtime.GOOS == "darwin" {
- tio = TIOCGWINSZ_OSX
+ if !istty {
+ return 0, errors.New("Not Supported")
}
- res, _, err := syscall.Syscall(sysIoctl,
- tty.Fd(),
- uintptr(tio),
- uintptr(unsafe.Pointer(w)),
- )
- if int(res) == -1 {
+ echoLockMutex.Lock()
+ defer echoLockMutex.Unlock()
+
+ fd := int(tty.Fd())
+
+ ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
+ if err != nil {
return 0, err
}
- return int(w.Col), nil
-}
-var oldState syscall.Termios
+ return int(ws.Col), nil
+}
-func lockEcho() (quit chan int, err error) {
+func lockEcho() (shutdownCh chan struct{}, err error) {
echoLockMutex.Lock()
defer echoLockMutex.Unlock()
- if echoLocked {
- err = ErrPoolWasStarted
- return
- }
- echoLocked = true
+ if istty {
+ if origTermStatePtr != nil {
+ return shutdownCh, ErrPoolWasStarted
+ }
- fd := tty.Fd()
- if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 {
- err = fmt.Errorf("Can't get terminal settings: %v", e)
- return
- }
+ fd := int(tty.Fd())
+
+ origTermStatePtr, err = unix.IoctlGetTermios(fd, ioctlReadTermios)
+ if err != nil {
+ return nil, fmt.Errorf("Can't get terminal settings: %v", err)
+ }
+
+ oldTermios := *origTermStatePtr
+ newTermios := oldTermios
+ newTermios.Lflag &^= syscall.ECHO
+ newTermios.Lflag |= syscall.ICANON | syscall.ISIG
+ newTermios.Iflag |= syscall.ICRNL
+ if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newTermios); err != nil {
+ return nil, fmt.Errorf("Can't set terminal settings: %v", err)
+ }
- newState := oldState
- newState.Lflag &^= syscall.ECHO
- newState.Lflag |= syscall.ICANON | syscall.ISIG
- newState.Iflag |= syscall.ICRNL
- if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); e != 0 {
- err = fmt.Errorf("Can't set terminal settings: %v", e)
- return
}
- quit = make(chan int, 1)
- go catchTerminate(quit)
+ shutdownCh = make(chan struct{})
+ go catchTerminate(shutdownCh)
return
}
-func unlockEcho() (err error) {
+func unlockEcho() error {
echoLockMutex.Lock()
defer echoLockMutex.Unlock()
- if !echoLocked {
- return
- }
- echoLocked = false
- fd := tty.Fd()
- if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 {
- err = fmt.Errorf("Can't set terminal settings")
+ if istty {
+ if origTermStatePtr == nil {
+ return nil
+ }
+
+ fd := int(tty.Fd())
+
+ if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, origTermStatePtr); err != nil {
+ return fmt.Errorf("Can't set terminal settings: %v", err)
+ }
+
}
- return
+ origTermStatePtr = nil
+
+ return nil
}
// listen exit signals and restore terminal state
-func catchTerminate(quit chan int) {
+func catchTerminate(shutdownCh chan struct{}) {
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL)
defer signal.Stop(sig)
select {
- case <-quit:
+ case <-shutdownCh:
unlockEcho()
case <-sig:
unlockEcho()
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool.go
index 0b4a4afa8..392e7599c 100644
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pool.go
+++ b/vendor/gopkg.in/cheggaaa/pb.v1/pool.go
@@ -3,6 +3,7 @@
package pb
import (
+ "io"
"sync"
"time"
)
@@ -11,22 +12,36 @@ import (
// You need call pool.Stop() after work
func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) {
pool = new(Pool)
- if err = pool.start(); err != nil {
+ if err = pool.Start(); err != nil {
return
}
pool.Add(pbs...)
return
}
+// NewPool initialises a pool with progress bars, but
+// doesn't start it. You need to call Start manually
+func NewPool(pbs ...*ProgressBar) (pool *Pool) {
+ pool = new(Pool)
+ pool.Add(pbs...)
+ return
+}
+
type Pool struct {
- RefreshRate time.Duration
- bars []*ProgressBar
- quit chan int
- finishOnce sync.Once
+ Output io.Writer
+ RefreshRate time.Duration
+ bars []*ProgressBar
+ lastBarsCount int
+ shutdownCh chan struct{}
+ workerCh chan struct{}
+ m sync.Mutex
+ finishOnce sync.Once
}
// Add progress bars.
func (p *Pool) Add(pbs ...*ProgressBar) {
+ p.m.Lock()
+ defer p.m.Unlock()
for _, bar := range pbs {
bar.ManualUpdate = true
bar.NotPrint = true
@@ -35,30 +50,38 @@ func (p *Pool) Add(pbs ...*ProgressBar) {
}
}
-func (p *Pool) start() (err error) {
+func (p *Pool) Start() (err error) {
p.RefreshRate = DefaultRefreshRate
- quit, err := lockEcho()
+ p.shutdownCh, err = lockEcho()
if err != nil {
return
}
- p.quit = make(chan int)
- go p.writer(quit)
+ p.workerCh = make(chan struct{})
+ go p.writer()
return
}
-func (p *Pool) writer(finish chan int) {
+func (p *Pool) writer() {
var first = true
+ defer func() {
+ if first == false {
+ p.print(false)
+ } else {
+ p.print(true)
+ p.print(false)
+ }
+ close(p.workerCh)
+ }()
+
for {
select {
case <-time.After(p.RefreshRate):
if p.print(first) {
p.print(false)
- finish <- 1
return
}
first = false
- case <-p.quit:
- finish <- 1
+ case <-p.shutdownCh:
return
}
}
@@ -66,11 +89,16 @@ func (p *Pool) writer(finish chan int) {
// Restore terminal state and close pool
func (p *Pool) Stop() error {
- // Wait until one final refresh has passed.
- time.Sleep(p.RefreshRate)
-
p.finishOnce.Do(func() {
- close(p.quit)
+ if p.shutdownCh != nil {
+ close(p.shutdownCh)
+ }
})
+
+ // Wait for the worker to complete
+ select {
+ case <-p.workerCh:
+ }
+
return unlockEcho()
}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go
index d7a5ace41..63598d378 100644
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go
+++ b/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go
@@ -8,13 +8,18 @@ import (
)
func (p *Pool) print(first bool) bool {
+ p.m.Lock()
+ defer p.m.Unlock()
var out string
if !first {
coords, err := getCursorPos()
if err != nil {
log.Panic(err)
}
- coords.Y -= int16(len(p.bars))
+ coords.Y -= int16(p.lastBarsCount)
+ if coords.Y < 0 {
+ coords.Y = 0
+ }
coords.X = 0
err = setCursorPos(coords)
@@ -24,12 +29,17 @@ func (p *Pool) print(first bool) bool {
}
isFinished := true
for _, bar := range p.bars {
- if !bar.isFinish {
+ if !bar.IsFinished() {
isFinished = false
}
bar.Update()
out += fmt.Sprintf("\r%s\n", bar.String())
}
- fmt.Print(out)
+ if p.Output != nil {
+ fmt.Fprint(p.Output, out)
+ } else {
+ fmt.Print(out)
+ }
+ p.lastBarsCount = len(p.bars)
return isFinished
}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go
index d95b71d87..a8ae14d2f 100644
--- a/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go
+++ b/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go
@@ -5,18 +5,25 @@ package pb
import "fmt"
func (p *Pool) print(first bool) bool {
+ p.m.Lock()
+ defer p.m.Unlock()
var out string
if !first {
- out = fmt.Sprintf("\033[%dA", len(p.bars))
+ out = fmt.Sprintf("\033[%dA", p.lastBarsCount)
}
isFinished := true
for _, bar := range p.bars {
- if !bar.isFinish {
+ if !bar.IsFinished() {
isFinished = false
}
bar.Update()
out += fmt.Sprintf("\r%s\n", bar.String())
}
- fmt.Print(out)
+ if p.Output != nil {
+ fmt.Fprint(p.Output, out)
+ } else {
+ fmt.Print(out)
+ }
+ p.lastBarsCount = len(p.bars)
return isFinished
}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go b/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go
index d52edd365..c617c55ec 100644
--- a/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go
+++ b/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go
@@ -11,7 +11,7 @@ var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d")
func escapeAwareRuneCountInString(s string) int {
n := runewidth.StringWidth(s)
for _, sm := range ctrlFinder.FindAllString(s, -1) {
- n -= len(sm)
+ n -= runewidth.StringWidth(sm)
}
return n
}
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go b/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go
deleted file mode 100644
index ebb3fe87c..000000000
--- a/vendor/gopkg.in/cheggaaa/pb.v1/termios_nix.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build linux solaris
-// +build !appengine
-
-package pb
-
-const ioctlReadTermios = 0x5401 // syscall.TCGETS
-const ioctlWriteTermios = 0x5402 // syscall.TCSETS
diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go b/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go
new file mode 100644
index 000000000..b10f61859
--- /dev/null
+++ b/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go
@@ -0,0 +1,13 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux solaris
+// +build !appengine
+
+package pb
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+const ioctlWriteTermios = unix.TCSETS